Merge "ART: make RegionSpace::GetNextObject a static function"
diff --git a/build/apex/Android.bp b/build/apex/Android.bp
index 0ec0a15..193a3c1 100644
--- a/build/apex/Android.bp
+++ b/build/apex/Android.bp
@@ -18,12 +18,16 @@
"libopenjdkjvm",
"libopenjdkjvmti",
"libadbconnection",
+ "libjavacrypto",
]
bionic_native_shared_libs = [
"libc",
"libm",
"libdl",
]
+bionic_binaries_both = [
+ "linker",
+]
// - Fake library that avoids namespace issues and gives some warnings for nosy apps.
art_runtime_fake_native_shared_libs = [
// FIXME: Does not work as-is, because `libart_fake` is defined in libart_fake/Android.mk,
@@ -51,6 +55,7 @@
"libopenjdkjvmd",
"libopenjdkjvmtid",
"libadbconnectiond",
+ "libjavacrypto",
]
// Files associated with bionic / managed core library time zone APIs.
@@ -113,7 +118,8 @@
both: {
// TODO: Add logic to create a `dalvikvm` symlink to `dalvikvm32` or `dalvikvm64`
// (see `symlink_preferred_arch` in art/dalvikvm/Android.bp).
- binaries: art_runtime_base_binaries_both,
+ binaries: art_runtime_base_binaries_both
+ + bionic_binaries_both,
},
prefer32: {
binaries: art_runtime_base_binaries_prefer32,
@@ -142,7 +148,8 @@
both: {
// TODO: Add logic to create a `dalvikvm` symlink to `dalvikvm32` or `dalvikvm64`
// (see `symlink_preferred_arch` in art/dalvikvm/Android.bp).
- binaries: art_runtime_base_binaries_both,
+ binaries: art_runtime_base_binaries_both
+ + bionic_binaries_both,
},
prefer32: {
binaries: art_runtime_base_binaries_prefer32
diff --git a/build/apex/ld.config.txt b/build/apex/ld.config.txt
index ac4d1eb..014b115 100644
--- a/build/apex/ld.config.txt
+++ b/build/apex/ld.config.txt
@@ -1 +1,31 @@
-# TODO: Write me.
+# Copyright (C) 2018 The Android Open Source Project
+#
+# Bionic loader config file for the Runtime APEX.
+#
+# There are no versioned APEX paths here - this APEX module does not support
+# having several versions mounted.
+
+dir.runtime = /apex/com.android.runtime/bin/
+
+[runtime]
+additional.namespaces = platform
+
+# Keep in sync with runtime namespace in /system/etc/ld.config.txt.
+namespace.default.isolated = true
+namespace.default.search.paths = /apex/com.android.runtime/${LIB}
+# odex files are in /system/framework. dalvikvm has to be able to dlopen the
+# files for CTS.
+namespace.default.permitted.paths = /system/framework
+namespace.default.links = platform
+# TODO(b/119867084): Restrict fallback to platform namespace to PALette library.
+namespace.default.link.platform.allow_all_shared_libs = true
+
+# Keep in sync with default namespace in /system/etc/ld.config.txt.
+namespace.platform.isolated = true
+namespace.platform.search.paths = /system/${LIB}
+namespace.platform.links = default
+namespace.platform.link.default.shared_libs = libc.so:libdl.so:libm.so
+namespace.platform.link.default.shared_libs += libart.so:libartd.so
+namespace.platform.link.default.shared_libs += libnativebridge.so
+namespace.platform.link.default.shared_libs += libnativehelper.so
+namespace.platform.link.default.shared_libs += libnativeloader.so
diff --git a/build/art.go b/build/art.go
index 01848c8..22f6410 100644
--- a/build/art.go
+++ b/build/art.go
@@ -282,7 +282,7 @@
func init() {
android.RegisterModuleType("art_cc_library", artLibrary)
- android.RegisterModuleType("art_cc_static_library", artStaticLibrary)
+ android.RegisterModuleType("art_cc_library_static", artStaticLibrary)
android.RegisterModuleType("art_cc_binary", artBinary)
android.RegisterModuleType("art_cc_test", artTest)
android.RegisterModuleType("art_cc_test_library", artTestLibrary)
diff --git a/compiler/cfi_test.h b/compiler/cfi_test.h
index 581edaa..658bdb3 100644
--- a/compiler/cfi_test.h
+++ b/compiler/cfi_test.h
@@ -53,13 +53,13 @@
dwarf::WriteCIE(is64bit, dwarf::Reg(8), initial_opcodes, kCFIFormat, &debug_frame_data_);
std::vector<uintptr_t> debug_frame_patches;
dwarf::WriteFDE(is64bit,
- /* section_address */ 0,
- /* cie_address */ 0,
- /* code_address */ 0,
+ /* section_address= */ 0,
+ /* cie_address= */ 0,
+ /* code_address= */ 0,
actual_asm.size(),
actual_cfi,
kCFIFormat,
- /* buffer_address */ 0,
+ /* buffer_address= */ 0,
&debug_frame_data_,
&debug_frame_patches);
ReformatCfi(Objdump(false, "-W"), &lines);
diff --git a/compiler/common_compiler_test.cc b/compiler/common_compiler_test.cc
index be6da71..07c73c9 100644
--- a/compiler/common_compiler_test.cc
+++ b/compiler/common_compiler_test.cc
@@ -197,7 +197,7 @@
compiler_driver_.reset(new CompilerDriver(compiler_options_.get(),
compiler_kind_,
number_of_threads_,
- /* swap_fd */ -1));
+ /* swap_fd= */ -1));
}
void CommonCompilerTest::SetUpRuntimeOptions(RuntimeOptions* options) {
@@ -264,7 +264,7 @@
Handle<mirror::DexCache> dex_cache(hs.NewHandle(method->GetDexCache()));
Handle<mirror::ClassLoader> h_class_loader = hs.NewHandle(
self->DecodeJObject(class_loader)->AsClassLoader());
- const DexFile::CodeItem* code_item = dex_file->GetCodeItem(method->GetCodeItemOffset());
+ const dex::CodeItem* code_item = dex_file->GetCodeItem(method->GetCodeItemOffset());
std::vector<const DexFile*> dex_files;
dex_files.push_back(dex_file);
diff --git a/compiler/compiled_method.h b/compiler/compiled_method.h
index 75790c9..e92777f 100644
--- a/compiler/compiled_method.h
+++ b/compiler/compiled_method.h
@@ -136,7 +136,7 @@
// This affects debug information generated at link time.
void MarkAsIntrinsic() {
DCHECK(!IsIntrinsic());
- SetPackedField<IsIntrinsicField>(/* value */ true);
+ SetPackedField<IsIntrinsicField>(/* value= */ true);
}
ArrayRef<const uint8_t> GetVmapTable() const;
diff --git a/compiler/compiler.cc b/compiler/compiler.cc
index 646040f..54da446 100644
--- a/compiler/compiler.cc
+++ b/compiler/compiler.cc
@@ -21,6 +21,7 @@
#include "base/macros.h"
#include "base/utils.h"
#include "dex/code_item_accessors-inl.h"
+#include "dex/dex_file.h"
#include "driver/compiler_driver.h"
#include "optimizing/optimizing_compiler.h"
@@ -39,7 +40,7 @@
}
}
-bool Compiler::IsPathologicalCase(const DexFile::CodeItem& code_item,
+bool Compiler::IsPathologicalCase(const dex::CodeItem& code_item,
uint32_t method_idx,
const DexFile& dex_file) {
/*
diff --git a/compiler/compiler.h b/compiler/compiler.h
index 8c07773..8a67724 100644
--- a/compiler/compiler.h
+++ b/compiler/compiler.h
@@ -19,10 +19,13 @@
#include "base/mutex.h"
#include "base/os.h"
-#include "dex/dex_file.h"
+#include "dex/invoke_type.h"
namespace art {
+namespace dex {
+struct CodeItem;
+} // namespace dex
namespace jit {
class JitCodeCache;
class JitLogger;
@@ -35,6 +38,7 @@
class ArtMethod;
class CompilerDriver;
class CompiledMethod;
+class DexFile;
template<class T> class Handle;
class OatWriter;
class Thread;
@@ -54,7 +58,7 @@
virtual bool CanCompileMethod(uint32_t method_idx, const DexFile& dex_file) const = 0;
- virtual CompiledMethod* Compile(const DexFile::CodeItem* code_item,
+ virtual CompiledMethod* Compile(const dex::CodeItem* code_item,
uint32_t access_flags,
InvokeType invoke_type,
uint16_t class_def_idx,
@@ -102,7 +106,7 @@
// Returns whether the method to compile is such a pathological case that
// it's not worth compiling.
- static bool IsPathologicalCase(const DexFile::CodeItem& code_item,
+ static bool IsPathologicalCase(const dex::CodeItem& code_item,
uint32_t method_idx,
const DexFile& dex_file);
diff --git a/compiler/debug/dwarf/dwarf_test.cc b/compiler/debug/dwarf/dwarf_test.cc
index 933034f..6512314 100644
--- a/compiler/debug/dwarf/dwarf_test.cc
+++ b/compiler/debug/dwarf/dwarf_test.cc
@@ -334,7 +334,7 @@
std::vector<uintptr_t> debug_info_patches;
std::vector<uintptr_t> expected_patches = { 16, 20, 29, 33, 42, 46 };
- dwarf::WriteDebugInfoCU(0 /* debug_abbrev_offset */, info,
+ dwarf::WriteDebugInfoCU(/* debug_abbrev_offset= */ 0, info,
0, &debug_info_data_, &debug_info_patches);
EXPECT_EQ(expected_patches, debug_info_patches);
diff --git a/compiler/debug/elf_debug_info_writer.h b/compiler/debug/elf_debug_info_writer.h
index bb550b3..a63f241 100644
--- a/compiler/debug/elf_debug_info_writer.h
+++ b/compiler/debug/elf_debug_info_writer.h
@@ -152,9 +152,9 @@
DCHECK(mi->dex_file != nullptr);
const DexFile* dex = mi->dex_file;
CodeItemDebugInfoAccessor accessor(*dex, mi->code_item, mi->dex_method_index);
- const DexFile::MethodId& dex_method = dex->GetMethodId(mi->dex_method_index);
- const DexFile::ProtoId& dex_proto = dex->GetMethodPrototype(dex_method);
- const DexFile::TypeList* dex_params = dex->GetProtoParameters(dex_proto);
+ const dex::MethodId& dex_method = dex->GetMethodId(mi->dex_method_index);
+ const dex::ProtoId& dex_proto = dex->GetMethodPrototype(dex_method);
+ const dex::TypeList* dex_params = dex->GetProtoParameters(dex_proto);
const char* dex_class_desc = dex->GetMethodDeclaringClassDescriptor(dex_method);
const bool is_static = (mi->access_flags & kAccStatic) != 0;
diff --git a/compiler/debug/elf_debug_writer.cc b/compiler/debug/elf_debug_writer.cc
index 56d773f..16f163b 100644
--- a/compiler/debug/elf_debug_writer.cc
+++ b/compiler/debug/elf_debug_writer.cc
@@ -48,13 +48,13 @@
dwarf::CFIFormat cfi_format,
bool write_oat_patches) {
// Write .strtab and .symtab.
- WriteDebugSymbols(builder, false /* mini-debug-info */, debug_info);
+ WriteDebugSymbols(builder, /* mini-debug-info= */ false, debug_info);
// Write .debug_frame.
WriteCFISection(builder, debug_info.compiled_methods, cfi_format, write_oat_patches);
// Group the methods into compilation units based on class.
- std::unordered_map<const DexFile::ClassDef*, ElfCompilationUnit> class_to_compilation_unit;
+ std::unordered_map<const dex::ClassDef*, ElfCompilationUnit> class_to_compilation_unit;
for (const MethodDebugInfo& mi : debug_info.compiled_methods) {
if (mi.dex_file != nullptr) {
auto& dex_class_def = mi.dex_file->GetClassDef(mi.class_def_index);
@@ -125,17 +125,17 @@
linker::VectorOutputStream out("Mini-debug-info ELF file", &buffer);
std::unique_ptr<linker::ElfBuilder<ElfTypes>> builder(
new linker::ElfBuilder<ElfTypes>(isa, features, &out));
- builder->Start(false /* write_program_headers */);
+ builder->Start(/* write_program_headers= */ false);
// Mirror ELF sections as NOBITS since the added symbols will reference them.
builder->GetText()->AllocateVirtualMemory(text_section_address, text_section_size);
if (dex_section_size != 0) {
builder->GetDex()->AllocateVirtualMemory(dex_section_address, dex_section_size);
}
- WriteDebugSymbols(builder.get(), true /* mini-debug-info */, debug_info);
+ WriteDebugSymbols(builder.get(), /* mini-debug-info= */ true, debug_info);
WriteCFISection(builder.get(),
debug_info.compiled_methods,
dwarf::DW_DEBUG_FRAME_FORMAT,
- false /* write_oat_paches */);
+ /* write_oat_patches= */ false);
builder->End();
CHECK(builder->Good());
std::vector<uint8_t> compressed_buffer;
@@ -187,21 +187,21 @@
std::unique_ptr<linker::ElfBuilder<ElfTypes>> builder(
new linker::ElfBuilder<ElfTypes>(isa, features, &out));
// No program headers since the ELF file is not linked and has no allocated sections.
- builder->Start(false /* write_program_headers */);
+ builder->Start(/* write_program_headers= */ false);
builder->GetText()->AllocateVirtualMemory(method_info.code_address, method_info.code_size);
if (mini_debug_info) {
// The compression is great help for multiple methods but it is not worth it for a
// single method due to the overheads so skip the compression here for performance.
- WriteDebugSymbols(builder.get(), true /* mini-debug-info */, debug_info);
+ WriteDebugSymbols(builder.get(), /* mini-debug-info= */ true, debug_info);
WriteCFISection(builder.get(),
debug_info.compiled_methods,
dwarf::DW_DEBUG_FRAME_FORMAT,
- false /* write_oat_paches */);
+ /* write_oat_patches= */ false);
} else {
WriteDebugInfo(builder.get(),
debug_info,
dwarf::DW_DEBUG_FRAME_FORMAT,
- false /* write_oat_patches */);
+ /* write_oat_patches= */ false);
}
builder->End();
CHECK(builder->Good());
@@ -359,12 +359,12 @@
std::unique_ptr<linker::ElfBuilder<ElfTypes>> builder(
new linker::ElfBuilder<ElfTypes>(isa, features, &out));
// No program headers since the ELF file is not linked and has no allocated sections.
- builder->Start(false /* write_program_headers */);
+ builder->Start(/* write_program_headers= */ false);
ElfDebugInfoWriter<ElfTypes> info_writer(builder.get());
info_writer.Start();
ElfCompilationUnitWriter<ElfTypes> cu_writer(&info_writer);
cu_writer.Write(types);
- info_writer.End(false /* write_oat_patches */);
+ info_writer.End(/* write_oat_patches= */ false);
builder->End();
CHECK(builder->Good());
diff --git a/compiler/debug/method_debug_info.h b/compiler/debug/method_debug_info.h
index 729c403..152db6e 100644
--- a/compiler/debug/method_debug_info.h
+++ b/compiler/debug/method_debug_info.h
@@ -32,7 +32,7 @@
size_t class_def_index;
uint32_t dex_method_index;
uint32_t access_flags;
- const DexFile::CodeItem* code_item;
+ const dex::CodeItem* code_item;
InstructionSet isa;
bool deduped;
bool is_native_debuggable;
diff --git a/compiler/dex/dex_to_dex_compiler.cc b/compiler/dex/dex_to_dex_compiler.cc
index c124ef5..23ce37e 100644
--- a/compiler/dex/dex_to_dex_compiler.cc
+++ b/compiler/dex/dex_to_dex_compiler.cc
@@ -473,7 +473,7 @@
method_idx,
unit_.GetDexCache(),
unit_.GetClassLoader(),
- /* referrer */ nullptr,
+ /* referrer= */ nullptr,
kVirtual);
if (UNLIKELY(resolved_method == nullptr)) {
@@ -505,7 +505,7 @@
}
CompiledMethod* DexToDexCompiler::CompileMethod(
- const DexFile::CodeItem* code_item,
+ const dex::CodeItem* code_item,
uint32_t access_flags,
InvokeType invoke_type ATTRIBUTE_UNUSED,
uint16_t class_def_idx,
@@ -627,11 +627,11 @@
void DexToDexCompiler::SetDexFiles(const std::vector<const DexFile*>& dex_files) {
// Record what code items are already seen to detect when multiple methods have the same code
// item.
- std::unordered_set<const DexFile::CodeItem*> seen_code_items;
+ std::unordered_set<const dex::CodeItem*> seen_code_items;
for (const DexFile* dex_file : dex_files) {
for (ClassAccessor accessor : dex_file->GetClasses()) {
for (const ClassAccessor::Method& method : accessor.GetMethods()) {
- const DexFile::CodeItem* code_item = method.GetCodeItem();
+ const dex::CodeItem* code_item = method.GetCodeItem();
// Detect the shared code items.
if (!seen_code_items.insert(code_item).second) {
shared_code_items_.insert(code_item);
@@ -646,7 +646,7 @@
MutexLock mu(Thread::Current(), lock_);
size_t unquicken_count = 0;
for (const auto& pair : shared_code_item_quicken_info_) {
- const DexFile::CodeItem* code_item = pair.first;
+ const dex::CodeItem* code_item = pair.first;
const QuickenState& state = pair.second;
CHECK_GE(state.methods_.size(), 1u);
if (state.conflict_) {
diff --git a/compiler/dex/dex_to_dex_compiler.h b/compiler/dex/dex_to_dex_compiler.h
index 7253488..78309ae 100644
--- a/compiler/dex/dex_to_dex_compiler.h
+++ b/compiler/dex/dex_to_dex_compiler.h
@@ -23,7 +23,6 @@
#include "base/bit_vector.h"
#include "base/mutex.h"
-#include "dex/dex_file.h"
#include "dex/invoke_type.h"
#include "dex/method_reference.h"
#include "handle.h"
@@ -34,6 +33,11 @@
class CompiledMethod;
class CompilerDriver;
class DexCompilationUnit;
+class DexFile;
+
+namespace dex {
+struct CodeItem;
+} // namespace dex
namespace mirror {
class ClassLoader;
@@ -50,7 +54,7 @@
explicit DexToDexCompiler(CompilerDriver* driver);
- CompiledMethod* CompileMethod(const DexFile::CodeItem* code_item,
+ CompiledMethod* CompileMethod(const dex::CodeItem* code_item,
uint32_t access_flags,
InvokeType invoke_type,
uint16_t class_def_idx,
@@ -105,9 +109,9 @@
std::unordered_map<const DexFile*, BitVector> should_quicken_;
// Guarded by lock_ during writing, accessed without a lock during quickening.
// This is safe because no thread is adding to the shared code items during the quickening phase.
- std::unordered_set<const DexFile::CodeItem*> shared_code_items_;
+ std::unordered_set<const dex::CodeItem*> shared_code_items_;
// Blacklisted code items are unquickened in UnquickenConflictingMethods.
- std::unordered_map<const DexFile::CodeItem*, QuickenState> shared_code_item_quicken_info_
+ std::unordered_map<const dex::CodeItem*, QuickenState> shared_code_item_quicken_info_
GUARDED_BY(lock_);
// Number of added code items.
size_t num_code_items_ GUARDED_BY(lock_) = 0u;
diff --git a/compiler/dex/dex_to_dex_decompiler_test.cc b/compiler/dex/dex_to_dex_decompiler_test.cc
index b055416..1f04546 100644
--- a/compiler/dex/dex_to_dex_decompiler_test.cc
+++ b/compiler/dex/dex_to_dex_decompiler_test.cc
@@ -95,7 +95,7 @@
optimizer::ArtDecompileDEX(*updated_dex_file,
*accessor.GetCodeItem(method),
table,
- /* decompile_return_instruction */ true);
+ /* decompile_return_instruction= */ true);
}
}
diff --git a/compiler/dex/inline_method_analyser.cc b/compiler/dex/inline_method_analyser.cc
index 183173b..b0f025d 100644
--- a/compiler/dex/inline_method_analyser.cc
+++ b/compiler/dex/inline_method_analyser.cc
@@ -216,7 +216,7 @@
DCHECK(IsInstructionIPut(new_iput->Opcode()));
uint32_t field_index = new_iput->VRegC_22c();
ClassLinker* class_linker = Runtime::Current()->GetClassLinker();
- ArtField* field = class_linker->LookupResolvedField(field_index, method, /* is_static */ false);
+ ArtField* field = class_linker->LookupResolvedField(field_index, method, /* is_static= */ false);
if (UNLIKELY(field == nullptr)) {
return false;
}
@@ -228,7 +228,7 @@
}
ArtField* f = class_linker->LookupResolvedField(iputs[old_pos].field_index,
method,
- /* is_static */ false);
+ /* is_static= */ false);
DCHECK(f != nullptr);
if (f == field) {
auto back_it = std::copy(iputs + old_pos + 1, iputs + arraysize(iputs), iputs + old_pos);
@@ -511,7 +511,7 @@
}
bool InlineMethodAnalyser::IsSyntheticAccessor(MethodReference ref) {
- const DexFile::MethodId& method_id = ref.dex_file->GetMethodId(ref.index);
+ const dex::MethodId& method_id = ref.dex_file->GetMethodId(ref.index);
const char* method_name = ref.dex_file->GetMethodName(method_id);
// javac names synthetic accessors "access$nnn",
// jack names them "-getN", "-putN", "-wrapN".
@@ -713,7 +713,7 @@
}
ObjPtr<mirror::DexCache> dex_cache = method->GetDexCache();
ClassLinker* class_linker = Runtime::Current()->GetClassLinker();
- ArtField* field = class_linker->LookupResolvedField(field_idx, method, /* is_static */ false);
+ ArtField* field = class_linker->LookupResolvedField(field_idx, method, /* is_static= */ false);
if (field == nullptr || field->IsStatic()) {
return false;
}
diff --git a/compiler/dex/verification_results.cc b/compiler/dex/verification_results.cc
index 5a34efb..6bd5fe8 100644
--- a/compiler/dex/verification_results.cc
+++ b/compiler/dex/verification_results.cc
@@ -112,7 +112,7 @@
// which have no verifier error, nor has methods that we know will throw
// at runtime.
std::unique_ptr<VerifiedMethod> verified_method = std::make_unique<VerifiedMethod>(
- /* encountered_error_types */ 0, /* has_runtime_throw */ false);
+ /* encountered_error_types= */ 0, /* has_runtime_throw= */ false);
if (atomic_verified_methods_.Insert(ref,
/*expected*/ nullptr,
verified_method.get()) ==
diff --git a/compiler/dex/verified_method.cc b/compiler/dex/verified_method.cc
index f2da3ff..54f216a 100644
--- a/compiler/dex/verified_method.cc
+++ b/compiler/dex/verified_method.cc
@@ -82,7 +82,7 @@
method_verifier->ResolveCheckedClass(dex::TypeIndex(inst.VRegB_21c()));
// Pass null for the method verifier to not record the VerifierDeps dependency
// if the types are not assignable.
- if (cast_type.IsStrictlyAssignableFrom(reg_type, /* method_verifier */ nullptr)) {
+ if (cast_type.IsStrictlyAssignableFrom(reg_type, /* verifier= */ nullptr)) {
// The types are assignable, we record that dependency in the VerifierDeps so
// that if this changes after OTA, we will re-verify again.
// We check if reg_type has a class, as the verifier may have inferred it's
@@ -92,8 +92,8 @@
verifier::VerifierDeps::MaybeRecordAssignability(method_verifier->GetDexFile(),
cast_type.GetClass(),
reg_type.GetClass(),
- /* strict */ true,
- /* assignable */ true);
+ /* is_strict= */ true,
+ /* is_assignable= */ true);
}
if (safe_cast_set_ == nullptr) {
safe_cast_set_.reset(new SafeCastSet());
diff --git a/compiler/driver/compiled_method_storage_test.cc b/compiler/driver/compiled_method_storage_test.cc
index 9fac2bc..05eacd8 100644
--- a/compiler/driver/compiled_method_storage_test.cc
+++ b/compiler/driver/compiled_method_storage_test.cc
@@ -23,7 +23,7 @@
namespace art {
TEST(CompiledMethodStorage, Deduplicate) {
- CompiledMethodStorage storage(/* swap_fd */ -1);
+ CompiledMethodStorage storage(/* swap_fd= */ -1);
ASSERT_TRUE(storage.DedupeEnabled()); // The default.
diff --git a/compiler/driver/compiler_driver-inl.h b/compiler/driver/compiler_driver-inl.h
index 63dcb46..ec2e38b 100644
--- a/compiler/driver/compiler_driver-inl.h
+++ b/compiler/driver/compiler_driver-inl.h
@@ -57,7 +57,7 @@
const DexCompilationUnit* mUnit) {
DCHECK_EQ(dex_cache->GetDexFile(), mUnit->GetDexFile());
DCHECK_EQ(class_loader.Get(), mUnit->GetClassLoader().Get());
- const DexFile::MethodId& referrer_method_id =
+ const dex::MethodId& referrer_method_id =
mUnit->GetDexFile()->GetMethodId(mUnit->GetDexMethodIndex());
return ResolveClass(soa, dex_cache, class_loader, referrer_method_id.class_idx_, mUnit);
}
diff --git a/compiler/driver/compiler_driver.cc b/compiler/driver/compiler_driver.cc
index 0039be0..e440eec 100644
--- a/compiler/driver/compiler_driver.cc
+++ b/compiler/driver/compiler_driver.cc
@@ -112,19 +112,7 @@
class CompilerDriver::AOTCompilationStats {
public:
AOTCompilationStats()
- : stats_lock_("AOT compilation statistics lock"),
- resolved_instance_fields_(0), unresolved_instance_fields_(0),
- resolved_local_static_fields_(0), resolved_static_fields_(0), unresolved_static_fields_(0),
- type_based_devirtualization_(0),
- safe_casts_(0), not_safe_casts_(0) {
- for (size_t i = 0; i <= kMaxInvokeType; i++) {
- resolved_methods_[i] = 0;
- unresolved_methods_[i] = 0;
- virtual_made_direct_[i] = 0;
- direct_calls_to_boot_[i] = 0;
- direct_methods_to_boot_[i] = 0;
- }
- }
+ : stats_lock_("AOT compilation statistics lock") {}
void Dump() {
DumpStat(resolved_instance_fields_, unresolved_instance_fields_, "instance fields resolved");
@@ -141,6 +129,16 @@
type_based_devirtualization_,
"virtual/interface calls made direct based on type information");
+ const size_t total = std::accumulate(
+ class_status_count_,
+ class_status_count_ + static_cast<size_t>(ClassStatus::kLast) + 1,
+ 0u);
+ for (size_t i = 0; i <= static_cast<size_t>(ClassStatus::kLast); ++i) {
+ std::ostringstream oss;
+ oss << "classes with status " << static_cast<ClassStatus>(i);
+ DumpStat(class_status_count_[i], total - class_status_count_[i], oss.str().c_str());
+ }
+
for (size_t i = 0; i <= kMaxInvokeType; i++) {
std::ostringstream oss;
oss << static_cast<InvokeType>(i) << " methods were AOT resolved";
@@ -219,26 +217,34 @@
not_safe_casts_++;
}
+ // Register a class status.
+ void AddClassStatus(ClassStatus status) REQUIRES(!stats_lock_) {
+ STATS_LOCK();
+ ++class_status_count_[static_cast<size_t>(status)];
+ }
+
private:
Mutex stats_lock_;
- size_t resolved_instance_fields_;
- size_t unresolved_instance_fields_;
+ size_t resolved_instance_fields_ = 0u;
+ size_t unresolved_instance_fields_ = 0u;
- size_t resolved_local_static_fields_;
- size_t resolved_static_fields_;
- size_t unresolved_static_fields_;
+ size_t resolved_local_static_fields_ = 0u;
+ size_t resolved_static_fields_ = 0u;
+ size_t unresolved_static_fields_ = 0u;
// Type based devirtualization for invoke interface and virtual.
- size_t type_based_devirtualization_;
+ size_t type_based_devirtualization_ = 0u;
- size_t resolved_methods_[kMaxInvokeType + 1];
- size_t unresolved_methods_[kMaxInvokeType + 1];
- size_t virtual_made_direct_[kMaxInvokeType + 1];
- size_t direct_calls_to_boot_[kMaxInvokeType + 1];
- size_t direct_methods_to_boot_[kMaxInvokeType + 1];
+ size_t resolved_methods_[kMaxInvokeType + 1] = {};
+ size_t unresolved_methods_[kMaxInvokeType + 1] = {};
+ size_t virtual_made_direct_[kMaxInvokeType + 1] = {};
+ size_t direct_calls_to_boot_[kMaxInvokeType + 1] = {};
+ size_t direct_methods_to_boot_[kMaxInvokeType + 1] = {};
- size_t safe_casts_;
- size_t not_safe_casts_;
+ size_t safe_casts_ = 0u;
+ size_t not_safe_casts_ = 0u;
+
+ size_t class_status_count_[static_cast<size_t>(ClassStatus::kLast) + 1] = {};
DISALLOW_COPY_AND_ASSIGN(AOTCompilationStats);
};
@@ -338,7 +344,7 @@
static optimizer::DexToDexCompiler::CompilationLevel GetDexToDexCompilationLevel(
Thread* self, const CompilerDriver& driver, Handle<mirror::ClassLoader> class_loader,
- const DexFile& dex_file, const DexFile::ClassDef& class_def)
+ const DexFile& dex_file, const dex::ClassDef& class_def)
REQUIRES_SHARED(Locks::mutator_lock_) {
// When the dex file is uncompressed in the APK, we do not generate a copy in the .vdex
// file. As a result, dex2oat will map the dex file read-only, and we only need to check
@@ -383,7 +389,7 @@
const CompilerDriver& driver,
jobject jclass_loader,
const DexFile& dex_file,
- const DexFile::ClassDef& class_def) {
+ const dex::ClassDef& class_def) {
ScopedObjectAccess soa(self);
StackHandleScope<1> hs(soa.Self());
Handle<mirror::ClassLoader> class_loader(
@@ -410,7 +416,7 @@
static void CompileMethodHarness(
Thread* self,
CompilerDriver* driver,
- const DexFile::CodeItem* code_item,
+ const dex::CodeItem* code_item,
uint32_t access_flags,
InvokeType invoke_type,
uint16_t class_def_idx,
@@ -459,7 +465,7 @@
static void CompileMethodDex2Dex(
Thread* self,
CompilerDriver* driver,
- const DexFile::CodeItem* code_item,
+ const dex::CodeItem* code_item,
uint32_t access_flags,
InvokeType invoke_type,
uint16_t class_def_idx,
@@ -470,7 +476,7 @@
Handle<mirror::DexCache> dex_cache) {
auto dex_2_dex_fn = [](Thread* self ATTRIBUTE_UNUSED,
CompilerDriver* driver,
- const DexFile::CodeItem* code_item,
+ const dex::CodeItem* code_item,
uint32_t access_flags,
InvokeType invoke_type,
uint16_t class_def_idx,
@@ -521,7 +527,7 @@
static void CompileMethodQuick(
Thread* self,
CompilerDriver* driver,
- const DexFile::CodeItem* code_item,
+ const dex::CodeItem* code_item,
uint32_t access_flags,
InvokeType invoke_type,
uint16_t class_def_idx,
@@ -533,7 +539,7 @@
auto quick_fn = [](
Thread* self,
CompilerDriver* driver,
- const DexFile::CodeItem* code_item,
+ const dex::CodeItem* code_item,
uint32_t access_flags,
InvokeType invoke_type,
uint16_t class_def_idx,
@@ -644,7 +650,7 @@
uint32_t method_idx,
uint32_t access_flags,
InvokeType invoke_type,
- const DexFile::CodeItem* code_item,
+ const dex::CodeItem* code_item,
Handle<mirror::DexCache> dex_cache,
Handle<mirror::ClassLoader> h_class_loader) {
// Can we run DEX-to-DEX compiler on this class ?
@@ -802,7 +808,7 @@
ObjPtr<mirror::Class> klass =
class_linker->LookupResolvedType(type_index,
dex_cache.Get(),
- /* class_loader */ nullptr);
+ /* class_loader= */ nullptr);
CHECK(klass != nullptr) << descriptor << " should have been previously resolved.";
// Now assign the bitstring if the class is not final. Keep this in sync with sharpening.
if (!klass->IsFinal()) {
@@ -1119,7 +1125,7 @@
ScopedNullHandle<mirror::ClassLoader>())
: nullptr;
if (klass == nullptr) {
- const DexFile::TypeId& type_id = dex_file->GetTypeId(exception_type_idx);
+ const dex::TypeId& type_id = dex_file->GetTypeId(exception_type_idx);
const char* descriptor = dex_file->GetTypeDescriptor(type_id);
LOG(FATAL) << "Failed to resolve class " << descriptor;
}
@@ -1191,7 +1197,7 @@
// Visitor for VisitReferences.
void operator()(ObjPtr<mirror::Object> object,
MemberOffset field_offset,
- bool /* is_static */) const
+ bool is_static ATTRIBUTE_UNUSED) const
REQUIRES_SHARED(Locks::mutator_lock_) {
mirror::Object* ref = object->GetFieldObject<mirror::Object>(field_offset);
if (ref != nullptr) {
@@ -1231,8 +1237,15 @@
bool operator()(ObjPtr<mirror::Class> klass) override REQUIRES_SHARED(Locks::mutator_lock_) {
std::string temp;
StringPiece name(klass->GetDescriptor(&temp));
- if (data_->image_class_descriptors_->find(name) != data_->image_class_descriptors_->end()) {
- data_->image_classes_.push_back(hs_.NewHandle(klass));
+ auto it = data_->image_class_descriptors_->find(name);
+ if (it != data_->image_class_descriptors_->end()) {
+ if (LIKELY(klass->IsResolved())) {
+ data_->image_classes_.push_back(hs_.NewHandle(klass));
+ } else {
+ DCHECK(klass->IsErroneousUnresolved());
+ VLOG(compiler) << "Removing unresolved class from image classes: " << name;
+ data_->image_class_descriptors_->erase(it);
+ }
} else {
// Check whether it is initialized and has a clinit. They must be kept, too.
if (klass->IsInitialized() && klass->FindClassInitializer(
@@ -1354,7 +1367,7 @@
Handle<mirror::DexCache> dex_cache(mUnit->GetDexCache());
{
Handle<mirror::ClassLoader> class_loader = mUnit->GetClassLoader();
- resolved_field = ResolveField(soa, dex_cache, class_loader, field_idx, /* is_static */ false);
+ resolved_field = ResolveField(soa, dex_cache, class_loader, field_idx, /* is_static= */ false);
referrer_class = resolved_field != nullptr
? ResolveCompilingMethodsClass(soa, dex_cache, class_loader, mUnit) : nullptr;
}
@@ -1583,7 +1596,7 @@
// needs it, here we try to resolve fields and methods used in class
// definitions, since many of them many never be referenced by
// generated code.
- const DexFile::ClassDef& class_def = dex_file.GetClassDef(class_def_index);
+ const dex::ClassDef& class_def = dex_file.GetClassDef(class_def_index);
ScopedObjectAccess soa(self);
StackHandleScope<2> hs(soa.Self());
Handle<mirror::ClassLoader> class_loader(
@@ -1885,7 +1898,7 @@
ScopedTrace trace(__FUNCTION__);
ScopedObjectAccess soa(Thread::Current());
const DexFile& dex_file = *manager_->GetDexFile();
- const DexFile::ClassDef& class_def = dex_file.GetClassDef(class_def_index);
+ const dex::ClassDef& class_def = dex_file.GetClassDef(class_def_index);
const char* descriptor = dex_file.GetClassDescriptor(class_def);
ClassLinker* class_linker = manager_->GetClassLinker();
jobject jclass_loader = manager_->GetClassLoader();
@@ -2019,7 +2032,7 @@
ScopedTrace trace(__FUNCTION__);
ScopedObjectAccess soa(Thread::Current());
const DexFile& dex_file = *manager_->GetDexFile();
- const DexFile::ClassDef& class_def = dex_file.GetClassDef(class_def_index);
+ const dex::ClassDef& class_def = dex_file.GetClassDef(class_def_index);
const char* descriptor = dex_file.GetClassDescriptor(class_def);
ClassLinker* class_linker = manager_->GetClassLinker();
jobject jclass_loader = manager_->GetClassLoader();
@@ -2084,8 +2097,8 @@
ScopedTrace trace(__FUNCTION__);
jobject jclass_loader = manager_->GetClassLoader();
const DexFile& dex_file = *manager_->GetDexFile();
- const DexFile::ClassDef& class_def = dex_file.GetClassDef(class_def_index);
- const DexFile::TypeId& class_type_id = dex_file.GetTypeId(class_def.class_idx_);
+ const dex::ClassDef& class_def = dex_file.GetClassDef(class_def_index);
+ const dex::TypeId& class_type_id = dex_file.GetTypeId(class_def.class_idx_);
const char* descriptor = dex_file.StringDataByIdx(class_type_id.descriptor_idx_);
ScopedObjectAccess soa(Thread::Current());
@@ -2095,8 +2108,11 @@
Handle<mirror::Class> klass(
hs.NewHandle(manager_->GetClassLinker()->FindClass(soa.Self(), descriptor, class_loader)));
- if (klass != nullptr && !SkipClass(manager_->GetClassLoader(), dex_file, klass.Get())) {
- TryInitializeClass(klass, class_loader);
+ if (klass != nullptr) {
+ if (!SkipClass(manager_->GetClassLoader(), dex_file, klass.Get())) {
+ TryInitializeClass(klass, class_loader);
+ }
+ manager_->GetCompiler()->stats_->AddClassStatus(klass->GetStatus());
}
// Clear any class not found or verification exceptions.
soa.Self()->ClearException();
@@ -2106,8 +2122,8 @@
void TryInitializeClass(Handle<mirror::Class> klass, Handle<mirror::ClassLoader>& class_loader)
REQUIRES_SHARED(Locks::mutator_lock_) {
const DexFile& dex_file = klass->GetDexFile();
- const DexFile::ClassDef* class_def = klass->GetClassDef();
- const DexFile::TypeId& class_type_id = dex_file.GetTypeId(class_def->class_idx_);
+ const dex::ClassDef* class_def = klass->GetClassDef();
+ const dex::TypeId& class_type_id = dex_file.GetTypeId(class_def->class_idx_);
const char* descriptor = dex_file.StringDataByIdx(class_type_id.descriptor_idx_);
ScopedObjectAccessUnchecked soa(Thread::Current());
StackHandleScope<3> hs(soa.Self());
@@ -2262,7 +2278,7 @@
StackHandleScope<1> hs(Thread::Current());
Handle<mirror::DexCache> dex_cache = hs.NewHandle(klass->GetDexCache());
- const DexFile::ClassDef* class_def = klass->GetClassDef();
+ const dex::ClassDef* class_def = klass->GetClassDef();
ClassLinker* class_linker = manager_->GetClassLinker();
// Check encoded final field values for strings and intern.
@@ -2304,7 +2320,7 @@
self->ClearException();
return false;
}
- const DexFile::TypeList* types = m->GetParameterTypeList();
+ const dex::TypeList* types = m->GetParameterTypeList();
if (types != nullptr) {
for (uint32_t i = 0; i < types->Size(); ++i) {
dex::TypeIndex param_type_idx = types->GetTypeItem(i).type_idx_;
@@ -2531,7 +2547,7 @@
}
if (GetCompilerOptions().IsBootImage()) {
// Prune garbage objects created during aborted transactions.
- Runtime::Current()->GetHeap()->CollectGarbage(/* clear_soft_references */ true);
+ Runtime::Current()->GetHeap()->CollectGarbage(/* clear_soft_references= */ true);
}
}
@@ -2559,7 +2575,7 @@
ClassLinker* class_linker = context.GetClassLinker();
jobject jclass_loader = context.GetClassLoader();
ClassReference ref(&dex_file, class_def_index);
- const DexFile::ClassDef& class_def = dex_file.GetClassDef(class_def_index);
+ const dex::ClassDef& class_def = dex_file.GetClassDef(class_def_index);
ClassAccessor accessor(dex_file, class_def_index);
CompilerDriver* const driver = context.GetCompiler();
// Skip compiling classes with generic verifier failures since they will still fail at runtime
diff --git a/compiler/driver/compiler_driver.h b/compiler/driver/compiler_driver.h
index 7c0fc64..6f8ec12 100644
--- a/compiler/driver/compiler_driver.h
+++ b/compiler/driver/compiler_driver.h
@@ -36,7 +36,6 @@
#include "class_status.h"
#include "compiler.h"
#include "dex/class_reference.h"
-#include "dex/dex_file.h"
#include "dex/dex_file_types.h"
#include "dex/dex_to_dex_compiler.h"
#include "dex/method_reference.h"
@@ -47,6 +46,10 @@
namespace art {
+namespace dex {
+struct CodeItem;
+} // namespace dex
+
namespace mirror {
class Class;
class DexCache;
@@ -62,6 +65,7 @@
class CompiledMethod;
class CompilerOptions;
class DexCompilationUnit;
+class DexFile;
template<class T> class Handle;
struct InlineIGetIPutData;
class InstructionSetFeatures;
@@ -127,7 +131,7 @@
uint32_t method_idx,
uint32_t access_flags,
InvokeType invoke_type,
- const DexFile::CodeItem* code_item,
+ const dex::CodeItem* code_item,
Handle<mirror::DexCache> dex_cache,
Handle<mirror::ClassLoader> h_class_loader)
REQUIRES(!Locks::mutator_lock_);
@@ -378,6 +382,7 @@
friend class CommonCompilerTest;
friend class CompileClassVisitor;
friend class DexToDexDecompilerTest;
+ friend class InitializeClassVisitor;
friend class verifier::VerifierDepsTest;
DISALLOW_COPY_AND_ASSIGN(CompilerDriver);
};
diff --git a/compiler/driver/compiler_driver_test.cc b/compiler/driver/compiler_driver_test.cc
index b924129..e73d072 100644
--- a/compiler/driver/compiler_driver_test.cc
+++ b/compiler/driver/compiler_driver_test.cc
@@ -80,7 +80,7 @@
void MakeDexFileExecutable(jobject class_loader, const DexFile& dex_file) {
ClassLinker* class_linker = Runtime::Current()->GetClassLinker();
for (size_t i = 0; i < dex_file.NumClassDefs(); i++) {
- const DexFile::ClassDef& class_def = dex_file.GetClassDef(i);
+ const dex::ClassDef& class_def = dex_file.GetClassDef(i);
const char* descriptor = dex_file.GetClassDescriptor(class_def);
ScopedObjectAccess soa(Thread::Current());
StackHandleScope<1> hs(soa.Self());
diff --git a/compiler/driver/dex_compilation_unit.cc b/compiler/driver/dex_compilation_unit.cc
index e5a6f0e..0d0f074 100644
--- a/compiler/driver/dex_compilation_unit.cc
+++ b/compiler/driver/dex_compilation_unit.cc
@@ -30,7 +30,7 @@
DexCompilationUnit::DexCompilationUnit(Handle<mirror::ClassLoader> class_loader,
ClassLinker* class_linker,
const DexFile& dex_file,
- const DexFile::CodeItem* code_item,
+ const dex::CodeItem* code_item,
uint16_t class_def_idx,
uint32_t method_idx,
uint32_t access_flags,
diff --git a/compiler/driver/dex_compilation_unit.h b/compiler/driver/dex_compilation_unit.h
index 757f0e7..f68d93f 100644
--- a/compiler/driver/dex_compilation_unit.h
+++ b/compiler/driver/dex_compilation_unit.h
@@ -39,7 +39,7 @@
DexCompilationUnit(Handle<mirror::ClassLoader> class_loader,
ClassLinker* class_linker,
const DexFile& dex_file,
- const DexFile::CodeItem* code_item,
+ const dex::CodeItem* code_item,
uint16_t class_def_idx,
uint32_t method_idx,
uint32_t access_flags,
@@ -67,17 +67,17 @@
return dex_method_idx_;
}
- const DexFile::CodeItem* GetCodeItem() const {
+ const dex::CodeItem* GetCodeItem() const {
return code_item_;
}
const char* GetShorty() const {
- const DexFile::MethodId& method_id = dex_file_->GetMethodId(dex_method_idx_);
+ const dex::MethodId& method_id = dex_file_->GetMethodId(dex_method_idx_);
return dex_file_->GetMethodShorty(method_id);
}
const char* GetShorty(uint32_t* shorty_len) const {
- const DexFile::MethodId& method_id = dex_file_->GetMethodId(dex_method_idx_);
+ const dex::MethodId& method_id = dex_file_->GetMethodId(dex_method_idx_);
return dex_file_->GetMethodShorty(method_id, shorty_len);
}
@@ -165,7 +165,7 @@
const DexFile* const dex_file_;
- const DexFile::CodeItem* const code_item_;
+ const dex::CodeItem* const code_item_;
const uint16_t class_def_idx_;
const uint32_t dex_method_idx_;
const uint32_t access_flags_;
diff --git a/compiler/exception_test.cc b/compiler/exception_test.cc
index 80c0a68..d5ceafe 100644
--- a/compiler/exception_test.cc
+++ b/compiler/exception_test.cc
@@ -135,8 +135,8 @@
ASSERT_EQ(2u, accessor.TriesSize());
ASSERT_NE(0u, accessor.InsnsSizeInCodeUnits());
- const DexFile::TryItem& t0 = accessor.TryItems().begin()[0];
- const DexFile::TryItem& t1 = accessor.TryItems().begin()[1];
+ const dex::TryItem& t0 = accessor.TryItems().begin()[0];
+ const dex::TryItem& t1 = accessor.TryItems().begin()[1];
EXPECT_LE(t0.start_addr_, t1.start_addr_);
{
CatchHandlerIterator iter(accessor, 4 /* Dex PC in the first try block */);
@@ -187,14 +187,14 @@
}
fake_stack.push_back(method_g_->GetOatQuickMethodHeader(0)->ToNativeQuickPc(
- method_g_, kDexPc, /* is_catch_handler */ false)); // return pc
+ method_g_, kDexPc, /* is_for_catch_handler= */ false)); // return pc
// Create/push fake 16byte stack frame for method g
fake_stack.push_back(reinterpret_cast<uintptr_t>(method_g_));
fake_stack.push_back(0);
fake_stack.push_back(0);
fake_stack.push_back(method_g_->GetOatQuickMethodHeader(0)->ToNativeQuickPc(
- method_g_, kDexPc, /* is_catch_handler */ false)); // return pc
+ method_g_, kDexPc, /* is_for_catch_handler= */ false)); // return pc
// Create/push fake 16byte stack frame for method f
fake_stack.push_back(reinterpret_cast<uintptr_t>(method_f_));
diff --git a/compiler/jit/jit_compiler.cc b/compiler/jit/jit_compiler.cc
index 93575d7..0d35fec 100644
--- a/compiler/jit/jit_compiler.cc
+++ b/compiler/jit/jit_compiler.cc
@@ -172,8 +172,8 @@
compiler_driver_.reset(new CompilerDriver(
compiler_options_.get(),
Compiler::kOptimizing,
- /* thread_count */ 1,
- /* swap_fd */ -1));
+ /* thread_count= */ 1,
+ /* swap_fd= */ -1));
// Disable dedupe so we can remove compiled methods.
compiler_driver_->SetDedupeEnabled(false);
}
diff --git a/compiler/jni/jni_cfi_test.cc b/compiler/jni/jni_cfi_test.cc
index 920a3a8..b19a2b8 100644
--- a/compiler/jni/jni_cfi_test.cc
+++ b/compiler/jni/jni_cfi_test.cc
@@ -86,7 +86,7 @@
callee_save_regs, mr_conv->EntrySpills());
jni_asm->IncreaseFrameSize(32);
jni_asm->DecreaseFrameSize(32);
- jni_asm->RemoveFrame(frame_size, callee_save_regs, /* may_suspend */ true);
+ jni_asm->RemoveFrame(frame_size, callee_save_regs, /* may_suspend= */ true);
jni_asm->FinalizeCode();
std::vector<uint8_t> actual_asm(jni_asm->CodeSize());
MemoryRegion code(&actual_asm[0], actual_asm.size());
diff --git a/compiler/jni/jni_compiler_test.cc b/compiler/jni/jni_compiler_test.cc
index bd4304c..3c68389 100644
--- a/compiler/jni/jni_compiler_test.cc
+++ b/compiler/jni/jni_compiler_test.cc
@@ -2196,7 +2196,7 @@
// Methods not annotated with anything are not considered "fast native"
// -- Check that the annotation lookup does not find it.
void JniCompilerTest::NormalNativeImpl() {
- SetUpForTest(/* direct */ true,
+ SetUpForTest(/* direct= */ true,
"normalNative",
"()V",
CURRENT_JNI_WRAPPER(Java_MyClassNatives_normalNative));
@@ -2218,7 +2218,7 @@
}
void JniCompilerTest::FastNativeImpl() {
- SetUpForTest(/* direct */ true,
+ SetUpForTest(/* direct= */ true,
"fastNative",
"()V",
CURRENT_JNI_WRAPPER(Java_MyClassNatives_fastNative));
@@ -2241,7 +2241,7 @@
}
void JniCompilerTest::CriticalNativeImpl() {
- SetUpForTest(/* direct */ true,
+ SetUpForTest(/* direct= */ true,
// Important: Don't change the "current jni" yet to avoid a method name suffix.
"criticalNative",
"()V",
diff --git a/compiler/jni/quick/jni_compiler.cc b/compiler/jni/quick/jni_compiler.cc
index 09376dd..bdbf429 100644
--- a/compiler/jni/quick/jni_compiler.cc
+++ b/compiler/jni/quick/jni_compiler.cc
@@ -151,7 +151,7 @@
// Don't allow both @FastNative and @CriticalNative. They are mutually exclusive.
if (UNLIKELY(is_fast_native && is_critical_native)) {
LOG(FATAL) << "JniCompile: Method cannot be both @CriticalNative and @FastNative"
- << dex_file.PrettyMethod(method_idx, /* with_signature */ true);
+ << dex_file.PrettyMethod(method_idx, /* with_signature= */ true);
}
// @CriticalNative - extra checks:
@@ -162,15 +162,15 @@
CHECK(is_static)
<< "@CriticalNative functions cannot be virtual since that would"
<< "require passing a reference parameter (this), which is illegal "
- << dex_file.PrettyMethod(method_idx, /* with_signature */ true);
+ << dex_file.PrettyMethod(method_idx, /* with_signature= */ true);
CHECK(!is_synchronized)
<< "@CriticalNative functions cannot be synchronized since that would"
<< "require passing a (class and/or this) reference parameter, which is illegal "
- << dex_file.PrettyMethod(method_idx, /* with_signature */ true);
+ << dex_file.PrettyMethod(method_idx, /* with_signature= */ true);
for (size_t i = 0; i < strlen(shorty); ++i) {
CHECK_NE(Primitive::kPrimNot, Primitive::GetType(shorty[i]))
<< "@CriticalNative methods' shorty types must not have illegal references "
- << dex_file.PrettyMethod(method_idx, /* with_signature */ true);
+ << dex_file.PrettyMethod(method_idx, /* with_signature= */ true);
}
}
}
@@ -632,7 +632,7 @@
__ DecreaseFrameSize(current_out_arg_size);
// 15. Process pending exceptions from JNI call or monitor exit.
- __ ExceptionPoll(main_jni_conv->InterproceduralScratchRegister(), 0 /* stack_adjust */);
+ __ ExceptionPoll(main_jni_conv->InterproceduralScratchRegister(), 0 /* stack_adjust= */);
// 16. Remove activation - need to restore callee save registers since the GC may have changed
// them.
diff --git a/compiler/linker/elf_builder.h b/compiler/linker/elf_builder.h
index 44f3296..6acce10 100644
--- a/compiler/linker/elf_builder.h
+++ b/compiler/linker/elf_builder.h
@@ -282,10 +282,10 @@
name,
SHT_STRTAB,
flags,
- /* link */ nullptr,
- /* info */ 0,
+ /* link= */ nullptr,
+ /* info= */ 0,
align,
- /* entsize */ 0) { }
+ /* entsize= */ 0) { }
Elf_Word Add(const std::string& name) {
if (CachedSection::GetCacheSize() == 0u) {
@@ -306,10 +306,10 @@
name,
SHT_STRTAB,
flags,
- /* link */ nullptr,
- /* info */ 0,
+ /* link= */ nullptr,
+ /* info= */ 0,
align,
- /* entsize */ 0) {
+ /* entsize= */ 0) {
Reset();
}
@@ -351,7 +351,7 @@
type,
flags,
strtab,
- /* info */ 1,
+ /* info= */ 1,
sizeof(Elf_Off),
sizeof(Elf_Sym)) {
syms_.push_back(Elf_Sym()); // The symbol table always has to start with NULL symbol.
@@ -768,7 +768,7 @@
// The runtime does not care about the size of this symbol (it uses the "lastword" symbol).
// We use size 0 (meaning "unknown size" in ELF) to prevent overlap with the debug symbols.
Elf_Word oatexec = dynstr_.Add("oatexec");
- dynsym_.Add(oatexec, &text_, text_.GetAddress(), /* size */ 0, STB_GLOBAL, STT_OBJECT);
+ dynsym_.Add(oatexec, &text_, text_.GetAddress(), /* size= */ 0, STB_GLOBAL, STT_OBJECT);
Elf_Word oatlastword = dynstr_.Add("oatlastword");
Elf_Word oatlastword_address = text_.GetAddress() + text_size - 4;
dynsym_.Add(oatlastword, &text_, oatlastword_address, 4, STB_GLOBAL, STT_OBJECT);
@@ -824,7 +824,7 @@
}
if (dex_size != 0u) {
Elf_Word oatdex = dynstr_.Add("oatdex");
- dynsym_.Add(oatdex, &dex_, dex_.GetAddress(), /* size */ 0, STB_GLOBAL, STT_OBJECT);
+ dynsym_.Add(oatdex, &dex_, dex_.GetAddress(), /* size= */ 0, STB_GLOBAL, STT_OBJECT);
Elf_Word oatdexlastword = dynstr_.Add("oatdexlastword");
Elf_Word oatdexlastword_address = dex_.GetAddress() + dex_size - 4;
dynsym_.Add(oatdexlastword, &dex_, oatdexlastword_address, 4, STB_GLOBAL, STT_OBJECT);
diff --git a/compiler/linker/linker_patch.h b/compiler/linker/linker_patch.h
index 5e1615f..f9e3930 100644
--- a/compiler/linker/linker_patch.h
+++ b/compiler/linker/linker_patch.h
@@ -58,7 +58,7 @@
static LinkerPatch IntrinsicReferencePatch(size_t literal_offset,
uint32_t pc_insn_offset,
uint32_t intrinsic_data) {
- LinkerPatch patch(literal_offset, Type::kIntrinsicReference, /* target_dex_file */ nullptr);
+ LinkerPatch patch(literal_offset, Type::kIntrinsicReference, /* target_dex_file= */ nullptr);
patch.intrinsic_data_ = intrinsic_data;
patch.pc_insn_offset_ = pc_insn_offset;
return patch;
@@ -67,7 +67,7 @@
static LinkerPatch DataBimgRelRoPatch(size_t literal_offset,
uint32_t pc_insn_offset,
uint32_t boot_image_offset) {
- LinkerPatch patch(literal_offset, Type::kDataBimgRelRo, /* target_dex_file */ nullptr);
+ LinkerPatch patch(literal_offset, Type::kDataBimgRelRo, /* target_dex_file= */ nullptr);
patch.boot_image_offset_ = boot_image_offset;
patch.pc_insn_offset_ = pc_insn_offset;
return patch;
@@ -144,7 +144,9 @@
static LinkerPatch BakerReadBarrierBranchPatch(size_t literal_offset,
uint32_t custom_value1 = 0u,
uint32_t custom_value2 = 0u) {
- LinkerPatch patch(literal_offset, Type::kBakerReadBarrierBranch, /* target_dex_file */ nullptr);
+ LinkerPatch patch(literal_offset,
+ Type::kBakerReadBarrierBranch,
+ /* target_dex_file= */ nullptr);
patch.baker_custom_value1_ = custom_value1;
patch.baker_custom_value2_ = custom_value2;
return patch;
diff --git a/compiler/optimizing/block_builder.cc b/compiler/optimizing/block_builder.cc
index d9df23f..3672cce 100644
--- a/compiler/optimizing/block_builder.cc
+++ b/compiler/optimizing/block_builder.cc
@@ -68,7 +68,7 @@
// places where the program might fall through into/out of the a block and
// where TryBoundary instructions will be inserted later. Other edges which
// enter/exit the try blocks are a result of branches/switches.
- for (const DexFile::TryItem& try_item : code_item_accessor_.TryItems()) {
+ for (const dex::TryItem& try_item : code_item_accessor_.TryItems()) {
uint32_t dex_pc_start = try_item.start_addr_;
uint32_t dex_pc_end = dex_pc_start + try_item.insn_count_;
MaybeCreateBlockAt(dex_pc_start);
@@ -222,9 +222,9 @@
}
// Returns the TryItem stored for `block` or nullptr if there is no info for it.
-static const DexFile::TryItem* GetTryItem(
+static const dex::TryItem* GetTryItem(
HBasicBlock* block,
- const ScopedArenaSafeMap<uint32_t, const DexFile::TryItem*>& try_block_info) {
+ const ScopedArenaSafeMap<uint32_t, const dex::TryItem*>& try_block_info) {
auto iterator = try_block_info.find(block->GetBlockId());
return (iterator == try_block_info.end()) ? nullptr : iterator->second;
}
@@ -235,7 +235,7 @@
// for a handler.
static void LinkToCatchBlocks(HTryBoundary* try_boundary,
const CodeItemDataAccessor& accessor,
- const DexFile::TryItem* try_item,
+ const dex::TryItem* try_item,
const ScopedArenaSafeMap<uint32_t, HBasicBlock*>& catch_blocks) {
for (CatchHandlerIterator it(accessor.GetCatchHandlerData(try_item->handler_off_));
it.HasNext();
@@ -279,7 +279,7 @@
// Keep a map of all try blocks and their respective TryItems. We do not use
// the block's pointer but rather its id to ensure deterministic iteration.
- ScopedArenaSafeMap<uint32_t, const DexFile::TryItem*> try_block_info(
+ ScopedArenaSafeMap<uint32_t, const dex::TryItem*> try_block_info(
std::less<uint32_t>(), local_allocator_->Adapter(kArenaAllocGraphBuilder));
// Obtain TryItem information for blocks with throwing instructions, and split
@@ -295,7 +295,7 @@
// loop for synchronized blocks.
if (ContainsElement(throwing_blocks_, block)) {
// Try to find a TryItem covering the block.
- const DexFile::TryItem* try_item = code_item_accessor_.FindTryItem(block->GetDexPc());
+ const dex::TryItem* try_item = code_item_accessor_.FindTryItem(block->GetDexPc());
if (try_item != nullptr) {
// Block throwing and in a TryItem. Store the try block information.
try_block_info.Put(block->GetBlockId(), try_item);
@@ -348,7 +348,7 @@
// that all predecessors are relinked to. This preserves loop headers (b/23895756).
for (const auto& entry : try_block_info) {
uint32_t block_id = entry.first;
- const DexFile::TryItem* try_item = entry.second;
+ const dex::TryItem* try_item = entry.second;
HBasicBlock* try_block = graph_->GetBlocks()[block_id];
for (HBasicBlock* predecessor : try_block->GetPredecessors()) {
if (GetTryItem(predecessor, try_block_info) != try_item) {
@@ -367,7 +367,7 @@
// the successor is not in the same TryItem.
for (const auto& entry : try_block_info) {
uint32_t block_id = entry.first;
- const DexFile::TryItem* try_item = entry.second;
+ const dex::TryItem* try_item = entry.second;
HBasicBlock* try_block = graph_->GetBlocks()[block_id];
// NOTE: Do not use iterators because SplitEdge would invalidate them.
for (size_t i = 0, e = try_block->GetSuccessors().size(); i < e; ++i) {
@@ -415,7 +415,7 @@
// Create blocks.
HBasicBlock* entry_block = new (allocator_) HBasicBlock(graph_, kNoDexPc);
HBasicBlock* exit_block = new (allocator_) HBasicBlock(graph_, kNoDexPc);
- HBasicBlock* body = MaybeCreateBlockAt(/* semantic_dex_pc */ kNoDexPc, /* store_dex_pc */ 0u);
+ HBasicBlock* body = MaybeCreateBlockAt(/* semantic_dex_pc= */ kNoDexPc, /* store_dex_pc= */ 0u);
// Add blocks to the graph.
graph_->AddBlock(entry_block);
diff --git a/compiler/optimizing/bounds_check_elimination.cc b/compiler/optimizing/bounds_check_elimination.cc
index 1c3660c..54a1ae9 100644
--- a/compiler/optimizing/bounds_check_elimination.cc
+++ b/compiler/optimizing/bounds_check_elimination.cc
@@ -1634,7 +1634,7 @@
HBasicBlock* block = GetPreHeader(loop, check);
HInstruction* cond =
new (GetGraph()->GetAllocator()) HEqual(array, GetGraph()->GetNullConstant());
- InsertDeoptInLoop(loop, block, cond, /* is_null_check */ true);
+ InsertDeoptInLoop(loop, block, cond, /* is_null_check= */ true);
ReplaceInstruction(check, array);
return true;
}
diff --git a/compiler/optimizing/bounds_check_elimination_test.cc b/compiler/optimizing/bounds_check_elimination_test.cc
index e15161e..5927d68 100644
--- a/compiler/optimizing/bounds_check_elimination_test.cc
+++ b/compiler/optimizing/bounds_check_elimination_test.cc
@@ -43,7 +43,7 @@
void RunBCE() {
graph_->BuildDominatorTree();
- InstructionSimplifier(graph_, /* codegen */ nullptr).Run();
+ InstructionSimplifier(graph_, /* codegen= */ nullptr).Run();
SideEffectsAnalysis side_effects(graph_);
side_effects.Run();
diff --git a/compiler/optimizing/code_generator.cc b/compiler/optimizing/code_generator.cc
index 2184f99..9e2f5cd 100644
--- a/compiler/optimizing/code_generator.cc
+++ b/compiler/optimizing/code_generator.cc
@@ -414,7 +414,7 @@
// This ensures that we have correct native line mapping for all native instructions.
// It is necessary to make stepping over a statement work. Otherwise, any initial
// instructions (e.g. moves) would be assumed to be the start of next statement.
- MaybeRecordNativeDebugInfo(nullptr /* instruction */, block->GetDexPc());
+ MaybeRecordNativeDebugInfo(/* instruction= */ nullptr, block->GetDexPc());
for (HInstructionIterator it(block->GetInstructions()); !it.Done(); it.Advance()) {
HInstruction* current = it.Current();
if (current->HasEnvironment()) {
@@ -987,7 +987,7 @@
// dex branch instructions.
static void CheckLoopEntriesCanBeUsedForOsr(const HGraph& graph,
const CodeInfo& code_info,
- const DexFile::CodeItem& code_item) {
+ const dex::CodeItem& code_item) {
if (graph.HasTryCatch()) {
// One can write loops through try/catch, which we do not support for OSR anyway.
return;
@@ -1029,7 +1029,7 @@
}
}
-ScopedArenaVector<uint8_t> CodeGenerator::BuildStackMaps(const DexFile::CodeItem* code_item) {
+ScopedArenaVector<uint8_t> CodeGenerator::BuildStackMaps(const dex::CodeItem* code_item) {
ScopedArenaVector<uint8_t> stack_map = GetStackMapStream()->Encode();
if (kIsDebugBuild && code_item != nullptr) {
CheckLoopEntriesCanBeUsedForOsr(*graph_, CodeInfo(stack_map.data()), *code_item);
@@ -1085,7 +1085,7 @@
// call). Therefore register_mask contains both callee-save and caller-save
// registers that hold objects. We must remove the spilled caller-save from the
// mask, since they will be overwritten by the callee.
- uint32_t spills = GetSlowPathSpills(locations, /* core_registers */ true);
+ uint32_t spills = GetSlowPathSpills(locations, /* core_registers= */ true);
register_mask &= ~spills;
} else {
// The register mask must be a subset of callee-save registers.
@@ -1164,7 +1164,7 @@
// Ensure that we do not collide with the stack map of the previous instruction.
GenerateNop();
}
- RecordPcInfo(instruction, dex_pc, slow_path, /* native_debug_info */ true);
+ RecordPcInfo(instruction, dex_pc, slow_path, /* native_debug_info= */ true);
}
}
@@ -1182,8 +1182,8 @@
stack_map_stream->BeginStackMapEntry(dex_pc,
native_pc,
- /* register_mask */ 0,
- /* stack_mask */ nullptr,
+ /* register_mask= */ 0,
+ /* sp_mask= */ nullptr,
StackMap::Kind::Catch);
HInstruction* current_phi = block->GetFirstPhi();
@@ -1555,7 +1555,7 @@
void SlowPathCode::SaveLiveRegisters(CodeGenerator* codegen, LocationSummary* locations) {
size_t stack_offset = codegen->GetFirstRegisterSlotInSlowPath();
- const uint32_t core_spills = codegen->GetSlowPathSpills(locations, /* core_registers */ true);
+ const uint32_t core_spills = codegen->GetSlowPathSpills(locations, /* core_registers= */ true);
for (uint32_t i : LowToHighBits(core_spills)) {
// If the register holds an object, update the stack mask.
if (locations->RegisterContainsObject(i)) {
@@ -1567,7 +1567,7 @@
stack_offset += codegen->SaveCoreRegister(stack_offset, i);
}
- const uint32_t fp_spills = codegen->GetSlowPathSpills(locations, /* core_registers */ false);
+ const uint32_t fp_spills = codegen->GetSlowPathSpills(locations, /* core_registers= */ false);
for (uint32_t i : LowToHighBits(fp_spills)) {
DCHECK_LT(stack_offset, codegen->GetFrameSize() - codegen->FrameEntrySpillSize());
DCHECK_LT(i, kMaximumNumberOfExpectedRegisters);
@@ -1579,14 +1579,14 @@
void SlowPathCode::RestoreLiveRegisters(CodeGenerator* codegen, LocationSummary* locations) {
size_t stack_offset = codegen->GetFirstRegisterSlotInSlowPath();
- const uint32_t core_spills = codegen->GetSlowPathSpills(locations, /* core_registers */ true);
+ const uint32_t core_spills = codegen->GetSlowPathSpills(locations, /* core_registers= */ true);
for (uint32_t i : LowToHighBits(core_spills)) {
DCHECK_LT(stack_offset, codegen->GetFrameSize() - codegen->FrameEntrySpillSize());
DCHECK_LT(i, kMaximumNumberOfExpectedRegisters);
stack_offset += codegen->RestoreCoreRegister(stack_offset, i);
}
- const uint32_t fp_spills = codegen->GetSlowPathSpills(locations, /* core_registers */ false);
+ const uint32_t fp_spills = codegen->GetSlowPathSpills(locations, /* core_registers= */ false);
for (uint32_t i : LowToHighBits(fp_spills)) {
DCHECK_LT(stack_offset, codegen->GetFrameSize() - codegen->FrameEntrySpillSize());
DCHECK_LT(i, kMaximumNumberOfExpectedRegisters);
diff --git a/compiler/optimizing/code_generator.h b/compiler/optimizing/code_generator.h
index 39966ff..f70ecb6 100644
--- a/compiler/optimizing/code_generator.h
+++ b/compiler/optimizing/code_generator.h
@@ -349,7 +349,7 @@
void AddSlowPath(SlowPathCode* slow_path);
- ScopedArenaVector<uint8_t> BuildStackMaps(const DexFile::CodeItem* code_item_for_osr_check);
+ ScopedArenaVector<uint8_t> BuildStackMaps(const dex::CodeItem* code_item_for_osr_check);
size_t GetNumberOfJitRoots() const;
// Fills the `literals` array with literals collected during code generation.
diff --git a/compiler/optimizing/code_generator_arm64.cc b/compiler/optimizing/code_generator_arm64.cc
index 9e2fd9e..ff99a3e 100644
--- a/compiler/optimizing/code_generator_arm64.cc
+++ b/compiler/optimizing/code_generator_arm64.cc
@@ -168,8 +168,8 @@
LocationSummary* locations,
int64_t spill_offset,
bool is_save) {
- const uint32_t core_spills = codegen->GetSlowPathSpills(locations, /* core_registers */ true);
- const uint32_t fp_spills = codegen->GetSlowPathSpills(locations, /* core_registers */ false);
+ const uint32_t core_spills = codegen->GetSlowPathSpills(locations, /* core_registers= */ true);
+ const uint32_t fp_spills = codegen->GetSlowPathSpills(locations, /* core_registers= */ false);
DCHECK(ArtVixlRegCodeCoherentForRegSet(core_spills,
codegen->GetNumberOfCoreRegisters(),
fp_spills,
@@ -212,7 +212,7 @@
void SlowPathCodeARM64::SaveLiveRegisters(CodeGenerator* codegen, LocationSummary* locations) {
size_t stack_offset = codegen->GetFirstRegisterSlotInSlowPath();
- const uint32_t core_spills = codegen->GetSlowPathSpills(locations, /* core_registers */ true);
+ const uint32_t core_spills = codegen->GetSlowPathSpills(locations, /* core_registers= */ true);
for (uint32_t i : LowToHighBits(core_spills)) {
// If the register holds an object, update the stack mask.
if (locations->RegisterContainsObject(i)) {
@@ -224,7 +224,7 @@
stack_offset += kXRegSizeInBytes;
}
- const uint32_t fp_spills = codegen->GetSlowPathSpills(locations, /* core_registers */ false);
+ const uint32_t fp_spills = codegen->GetSlowPathSpills(locations, /* core_registers= */ false);
for (uint32_t i : LowToHighBits(fp_spills)) {
DCHECK_LT(stack_offset, codegen->GetFrameSize() - codegen->FrameEntrySpillSize());
DCHECK_LT(i, kMaximumNumberOfExpectedRegisters);
@@ -234,13 +234,13 @@
SaveRestoreLiveRegistersHelper(codegen,
locations,
- codegen->GetFirstRegisterSlotInSlowPath(), true /* is_save */);
+ codegen->GetFirstRegisterSlotInSlowPath(), /* is_save= */ true);
}
void SlowPathCodeARM64::RestoreLiveRegisters(CodeGenerator* codegen, LocationSummary* locations) {
SaveRestoreLiveRegistersHelper(codegen,
locations,
- codegen->GetFirstRegisterSlotInSlowPath(), false /* is_save */);
+ codegen->GetFirstRegisterSlotInSlowPath(), /* is_save= */ false);
}
class BoundsCheckSlowPathARM64 : public SlowPathCodeARM64 {
@@ -926,7 +926,7 @@
uint32_t encoded_data = entry.first;
vixl::aarch64::Label* slow_path_entry = &entry.second.label;
__ Bind(slow_path_entry);
- CompileBakerReadBarrierThunk(*GetAssembler(), encoded_data, /* debug_name */ nullptr);
+ CompileBakerReadBarrierThunk(*GetAssembler(), encoded_data, /* debug_name= */ nullptr);
}
// Ensure we emit the literal pool.
@@ -1118,7 +1118,7 @@
}
}
- MaybeGenerateMarkingRegisterCheck(/* code */ __LINE__);
+ MaybeGenerateMarkingRegisterCheck(/* code= */ __LINE__);
}
void CodeGeneratorARM64::GenerateFrameExit() {
@@ -1888,7 +1888,7 @@
base,
offset,
maybe_temp,
- /* needs_null_check */ true,
+ /* needs_null_check= */ true,
field_info.IsVolatile());
} else {
// General case.
@@ -1897,7 +1897,7 @@
// CodeGeneratorARM64::LoadAcquire call.
// NB: LoadAcquire will record the pc info if needed.
codegen_->LoadAcquire(
- instruction, OutputCPURegister(instruction), field, /* needs_null_check */ true);
+ instruction, OutputCPURegister(instruction), field, /* needs_null_check= */ true);
} else {
// Ensure that between load and MaybeRecordImplicitNullCheck there are no pools emitted.
EmissionCheckScope guard(GetVIXLAssembler(), kMaxMacroInstructionSizeInBytes);
@@ -1952,7 +1952,7 @@
if (field_info.IsVolatile()) {
codegen_->StoreRelease(
- instruction, field_type, source, HeapOperand(obj, offset), /* needs_null_check */ true);
+ instruction, field_type, source, HeapOperand(obj, offset), /* needs_null_check= */ true);
} else {
// Ensure that between store and MaybeRecordImplicitNullCheck there are no pools emitted.
EmissionCheckScope guard(GetVIXLAssembler(), kMaxMacroInstructionSizeInBytes);
@@ -2376,11 +2376,11 @@
obj.W(),
offset,
maybe_temp,
- /* needs_null_check */ false,
- /* use_load_acquire */ false);
+ /* needs_null_check= */ false,
+ /* use_load_acquire= */ false);
} else {
codegen_->GenerateArrayLoadWithBakerReadBarrier(
- instruction, out, obj.W(), offset, index, /* needs_null_check */ false);
+ instruction, out, obj.W(), offset, index, /* needs_null_check= */ false);
}
} else {
// General case.
@@ -2925,7 +2925,7 @@
int64_t magic;
int shift;
CalculateMagicAndShiftForDivRem(
- imm, type == DataType::Type::kInt64 /* is_long */, &magic, &shift);
+ imm, /* is_long= */ type == DataType::Type::kInt64, &magic, &shift);
UseScratchRegisterScope temps(GetVIXLAssembler());
Register temp = temps.AcquireSameSizeAs(out);
@@ -3116,7 +3116,7 @@
}
if (block->IsEntryBlock() && (previous != nullptr) && previous->IsSuspendCheck()) {
GenerateSuspendCheck(previous->AsSuspendCheck(), nullptr);
- codegen_->MaybeGenerateMarkingRegisterCheck(/* code */ __LINE__);
+ codegen_->MaybeGenerateMarkingRegisterCheck(/* code= */ __LINE__);
}
if (!codegen_->GoesToNextBlock(block, successor)) {
__ B(codegen_->GetLabelOf(successor));
@@ -3266,7 +3266,7 @@
if (codegen_->GoesToNextBlock(if_instr->GetBlock(), false_successor)) {
false_target = nullptr;
}
- GenerateTestAndBranch(if_instr, /* condition_input_index */ 0, true_target, false_target);
+ GenerateTestAndBranch(if_instr, /* condition_input_index= */ 0, true_target, false_target);
}
void LocationsBuilderARM64::VisitDeoptimize(HDeoptimize* deoptimize) {
@@ -3285,9 +3285,9 @@
SlowPathCodeARM64* slow_path =
deopt_slow_paths_.NewSlowPath<DeoptimizationSlowPathARM64>(deoptimize);
GenerateTestAndBranch(deoptimize,
- /* condition_input_index */ 0,
+ /* condition_input_index= */ 0,
slow_path->GetEntryLabel(),
- /* false_target */ nullptr);
+ /* false_target= */ nullptr);
}
void LocationsBuilderARM64::VisitShouldDeoptimizeFlag(HShouldDeoptimizeFlag* flag) {
@@ -3627,7 +3627,7 @@
__ Cmp(out, cls);
DCHECK(locations->OnlyCallsOnSlowPath());
slow_path = new (codegen_->GetScopedAllocator()) TypeCheckSlowPathARM64(
- instruction, /* is_fatal */ false);
+ instruction, /* is_fatal= */ false);
codegen_->AddSlowPath(slow_path);
__ B(ne, slow_path->GetEntryLabel());
__ Mov(out, 1);
@@ -3659,7 +3659,7 @@
// This should also be beneficial for the other cases above.
DCHECK(locations->OnlyCallsOnSlowPath());
slow_path = new (codegen_->GetScopedAllocator()) TypeCheckSlowPathARM64(
- instruction, /* is_fatal */ false);
+ instruction, /* is_fatal= */ false);
codegen_->AddSlowPath(slow_path);
__ B(slow_path->GetEntryLabel());
if (zero.IsLinked()) {
@@ -3952,7 +3952,7 @@
void InstructionCodeGeneratorARM64::VisitInvokeUnresolved(HInvokeUnresolved* invoke) {
codegen_->GenerateInvokeUnresolvedRuntimeCall(invoke);
- codegen_->MaybeGenerateMarkingRegisterCheck(/* code */ __LINE__);
+ codegen_->MaybeGenerateMarkingRegisterCheck(/* code= */ __LINE__);
}
void LocationsBuilderARM64::HandleInvoke(HInvoke* invoke) {
@@ -4022,7 +4022,7 @@
codegen_->RecordPcInfo(invoke, invoke->GetDexPc());
}
- codegen_->MaybeGenerateMarkingRegisterCheck(/* code */ __LINE__);
+ codegen_->MaybeGenerateMarkingRegisterCheck(/* code= */ __LINE__);
}
void LocationsBuilderARM64::VisitInvokeVirtual(HInvokeVirtual* invoke) {
@@ -4201,7 +4201,7 @@
void InstructionCodeGeneratorARM64::VisitInvokePolymorphic(HInvokePolymorphic* invoke) {
codegen_->GenerateInvokePolymorphicCall(invoke);
- codegen_->MaybeGenerateMarkingRegisterCheck(/* code */ __LINE__);
+ codegen_->MaybeGenerateMarkingRegisterCheck(/* code= */ __LINE__);
}
void LocationsBuilderARM64::VisitInvokeCustom(HInvokeCustom* invoke) {
@@ -4210,21 +4210,21 @@
void InstructionCodeGeneratorARM64::VisitInvokeCustom(HInvokeCustom* invoke) {
codegen_->GenerateInvokeCustomCall(invoke);
- codegen_->MaybeGenerateMarkingRegisterCheck(/* code */ __LINE__);
+ codegen_->MaybeGenerateMarkingRegisterCheck(/* code= */ __LINE__);
}
vixl::aarch64::Label* CodeGeneratorARM64::NewBootImageIntrinsicPatch(
uint32_t intrinsic_data,
vixl::aarch64::Label* adrp_label) {
return NewPcRelativePatch(
- /* dex_file */ nullptr, intrinsic_data, adrp_label, &boot_image_intrinsic_patches_);
+ /* dex_file= */ nullptr, intrinsic_data, adrp_label, &boot_image_intrinsic_patches_);
}
vixl::aarch64::Label* CodeGeneratorARM64::NewBootImageRelRoPatch(
uint32_t boot_image_offset,
vixl::aarch64::Label* adrp_label) {
return NewPcRelativePatch(
- /* dex_file */ nullptr, boot_image_offset, adrp_label, &boot_image_method_patches_);
+ /* dex_file= */ nullptr, boot_image_offset, adrp_label, &boot_image_method_patches_);
}
vixl::aarch64::Label* CodeGeneratorARM64::NewBootImageMethodPatch(
@@ -4308,7 +4308,7 @@
ReserveJitStringRoot(StringReference(&dex_file, string_index), handle);
return jit_string_patches_.GetOrCreate(
StringReference(&dex_file, string_index),
- [this]() { return __ CreateLiteralDestroyedWithPool<uint32_t>(/* placeholder */ 0u); });
+ [this]() { return __ CreateLiteralDestroyedWithPool<uint32_t>(/* value= */ 0u); });
}
vixl::aarch64::Literal<uint32_t>* CodeGeneratorARM64::DeduplicateJitClassLiteral(
@@ -4316,7 +4316,7 @@
ReserveJitClassRoot(TypeReference(&dex_file, type_index), handle);
return jit_class_patches_.GetOrCreate(
TypeReference(&dex_file, type_index),
- [this]() { return __ CreateLiteralDestroyedWithPool<uint32_t>(/* placeholder */ 0u); });
+ [this]() { return __ CreateLiteralDestroyedWithPool<uint32_t>(/* value= */ 0u); });
}
void CodeGeneratorARM64::EmitAdrpPlaceholder(vixl::aarch64::Label* fixup_label,
@@ -4513,7 +4513,7 @@
DCHECK(!invoke->IsStaticWithExplicitClinitCheck());
if (TryGenerateIntrinsicCode(invoke, codegen_)) {
- codegen_->MaybeGenerateMarkingRegisterCheck(/* code */ __LINE__);
+ codegen_->MaybeGenerateMarkingRegisterCheck(/* code= */ __LINE__);
return;
}
@@ -4526,12 +4526,12 @@
invoke, locations->HasTemps() ? locations->GetTemp(0) : Location::NoLocation());
}
- codegen_->MaybeGenerateMarkingRegisterCheck(/* code */ __LINE__);
+ codegen_->MaybeGenerateMarkingRegisterCheck(/* code= */ __LINE__);
}
void InstructionCodeGeneratorARM64::VisitInvokeVirtual(HInvokeVirtual* invoke) {
if (TryGenerateIntrinsicCode(invoke, codegen_)) {
- codegen_->MaybeGenerateMarkingRegisterCheck(/* code */ __LINE__);
+ codegen_->MaybeGenerateMarkingRegisterCheck(/* code= */ __LINE__);
return;
}
@@ -4543,7 +4543,7 @@
DCHECK(!codegen_->IsLeafMethod());
}
- codegen_->MaybeGenerateMarkingRegisterCheck(/* code */ __LINE__);
+ codegen_->MaybeGenerateMarkingRegisterCheck(/* code= */ __LINE__);
}
HLoadClass::LoadKind CodeGeneratorARM64::GetSupportedLoadClassKind(
@@ -4611,7 +4611,7 @@
HLoadClass::LoadKind load_kind = cls->GetLoadKind();
if (load_kind == HLoadClass::LoadKind::kRuntimeCall) {
codegen_->GenerateLoadClassRuntimeCall(cls);
- codegen_->MaybeGenerateMarkingRegisterCheck(/* code */ __LINE__);
+ codegen_->MaybeGenerateMarkingRegisterCheck(/* code= */ __LINE__);
return;
}
DCHECK(!cls->NeedsAccessCheck());
@@ -4633,7 +4633,7 @@
out_loc,
current_method,
ArtMethod::DeclaringClassOffset().Int32Value(),
- /* fixup_label */ nullptr,
+ /* fixup_label= */ nullptr,
read_barrier_option);
break;
}
@@ -4696,8 +4696,8 @@
codegen_->GenerateGcRootFieldLoad(cls,
out_loc,
out.X(),
- /* offset */ 0,
- /* fixup_label */ nullptr,
+ /* offset= */ 0,
+ /* fixup_label= */ nullptr,
read_barrier_option);
break;
}
@@ -4721,7 +4721,7 @@
} else {
__ Bind(slow_path->GetExitLabel());
}
- codegen_->MaybeGenerateMarkingRegisterCheck(/* code */ __LINE__);
+ codegen_->MaybeGenerateMarkingRegisterCheck(/* code= */ __LINE__);
}
}
@@ -4859,7 +4859,7 @@
codegen_->AddSlowPath(slow_path);
__ Cbz(out.X(), slow_path->GetEntryLabel());
__ Bind(slow_path->GetExitLabel());
- codegen_->MaybeGenerateMarkingRegisterCheck(/* code */ __LINE__);
+ codegen_->MaybeGenerateMarkingRegisterCheck(/* code= */ __LINE__);
return;
}
case HLoadString::LoadKind::kJitBootImageAddress: {
@@ -4875,8 +4875,8 @@
codegen_->GenerateGcRootFieldLoad(load,
out_loc,
out.X(),
- /* offset */ 0,
- /* fixup_label */ nullptr,
+ /* offset= */ 0,
+ /* fixup_label= */ nullptr,
kCompilerReadBarrierOption);
return;
}
@@ -4890,7 +4890,7 @@
__ Mov(calling_convention.GetRegisterAt(0).W(), load->GetStringIndex().index_);
codegen_->InvokeRuntime(kQuickResolveString, load, load->GetDexPc());
CheckEntrypointTypes<kQuickResolveString, void*, uint32_t>();
- codegen_->MaybeGenerateMarkingRegisterCheck(/* code */ __LINE__);
+ codegen_->MaybeGenerateMarkingRegisterCheck(/* code= */ __LINE__);
}
void LocationsBuilderARM64::VisitLongConstant(HLongConstant* constant) {
@@ -4918,7 +4918,7 @@
} else {
CheckEntrypointTypes<kQuickUnlockObject, void, mirror::Object*>();
}
- codegen_->MaybeGenerateMarkingRegisterCheck(/* code */ __LINE__);
+ codegen_->MaybeGenerateMarkingRegisterCheck(/* code= */ __LINE__);
}
void LocationsBuilderARM64::VisitMul(HMul* mul) {
@@ -5013,7 +5013,7 @@
QuickEntrypointEnum entrypoint = CodeGenerator::GetArrayAllocationEntrypoint(instruction);
codegen_->InvokeRuntime(entrypoint, instruction, instruction->GetDexPc());
CheckEntrypointTypes<kQuickAllocArrayResolved, void*, mirror::Class*, int32_t>();
- codegen_->MaybeGenerateMarkingRegisterCheck(/* code */ __LINE__);
+ codegen_->MaybeGenerateMarkingRegisterCheck(/* code= */ __LINE__);
}
void LocationsBuilderARM64::VisitNewInstance(HNewInstance* instruction) {
@@ -5027,7 +5027,7 @@
void InstructionCodeGeneratorARM64::VisitNewInstance(HNewInstance* instruction) {
codegen_->InvokeRuntime(instruction->GetEntrypoint(), instruction, instruction->GetDexPc());
CheckEntrypointTypes<kQuickAllocObjectWithChecks, void*, mirror::Class*>();
- codegen_->MaybeGenerateMarkingRegisterCheck(/* code */ __LINE__);
+ codegen_->MaybeGenerateMarkingRegisterCheck(/* code= */ __LINE__);
}
void LocationsBuilderARM64::VisitNot(HNot* instruction) {
@@ -5502,7 +5502,7 @@
return;
}
GenerateSuspendCheck(instruction, nullptr);
- codegen_->MaybeGenerateMarkingRegisterCheck(/* code */ __LINE__);
+ codegen_->MaybeGenerateMarkingRegisterCheck(/* code= */ __LINE__);
}
void LocationsBuilderARM64::VisitThrow(HThrow* instruction) {
@@ -5715,8 +5715,8 @@
out_reg,
offset,
maybe_temp,
- /* needs_null_check */ false,
- /* use_load_acquire */ false);
+ /* needs_null_check= */ false,
+ /* use_load_acquire= */ false);
} else {
// Load with slow path based read barrier.
// Save the value of `out` into `maybe_temp` before overwriting it
@@ -5756,8 +5756,8 @@
obj_reg,
offset,
maybe_temp,
- /* needs_null_check */ false,
- /* use_load_acquire */ false);
+ /* needs_null_check= */ false,
+ /* use_load_acquire= */ false);
} else {
// Load with slow path based read barrier.
// /* HeapReference<Object> */ out = *(obj + offset)
@@ -5842,7 +5842,7 @@
// Note that GC roots are not affected by heap poisoning, thus we
// do not have to unpoison `root_reg` here.
}
- MaybeGenerateMarkingRegisterCheck(/* code */ __LINE__);
+ MaybeGenerateMarkingRegisterCheck(/* code= */ __LINE__);
}
void CodeGeneratorARM64::GenerateUnsafeCasOldValueMovWithBakerReadBarrier(
@@ -5931,7 +5931,7 @@
}
__ bind(&return_address);
}
- MaybeGenerateMarkingRegisterCheck(/* code */ __LINE__, /* temp_loc */ LocationFrom(ip1));
+ MaybeGenerateMarkingRegisterCheck(/* code= */ __LINE__, /* temp_loc= */ LocationFrom(ip1));
}
void CodeGeneratorARM64::GenerateFieldLoadWithBakerReadBarrier(HInstruction* instruction,
@@ -6039,7 +6039,7 @@
}
__ bind(&return_address);
}
- MaybeGenerateMarkingRegisterCheck(/* code */ __LINE__, /* temp_loc */ LocationFrom(ip1));
+ MaybeGenerateMarkingRegisterCheck(/* code= */ __LINE__, /* temp_loc= */ LocationFrom(ip1));
}
void CodeGeneratorARM64::MaybeGenerateMarkingRegisterCheck(int code, Location temp_loc) {
diff --git a/compiler/optimizing/code_generator_arm_vixl.cc b/compiler/optimizing/code_generator_arm_vixl.cc
index dad1813..8204f1e 100644
--- a/compiler/optimizing/code_generator_arm_vixl.cc
+++ b/compiler/optimizing/code_generator_arm_vixl.cc
@@ -319,7 +319,7 @@
size_t stack_offset = codegen->GetFirstRegisterSlotInSlowPath();
size_t orig_offset = stack_offset;
- const uint32_t core_spills = codegen->GetSlowPathSpills(locations, /* core_registers */ true);
+ const uint32_t core_spills = codegen->GetSlowPathSpills(locations, /* core_registers= */ true);
for (uint32_t i : LowToHighBits(core_spills)) {
// If the register holds an object, update the stack mask.
if (locations->RegisterContainsObject(i)) {
@@ -334,7 +334,7 @@
CodeGeneratorARMVIXL* arm_codegen = down_cast<CodeGeneratorARMVIXL*>(codegen);
arm_codegen->GetAssembler()->StoreRegisterList(core_spills, orig_offset);
- uint32_t fp_spills = codegen->GetSlowPathSpills(locations, /* core_registers */ false);
+ uint32_t fp_spills = codegen->GetSlowPathSpills(locations, /* core_registers= */ false);
orig_offset = stack_offset;
for (uint32_t i : LowToHighBits(fp_spills)) {
DCHECK_LT(i, kMaximumNumberOfExpectedRegisters);
@@ -357,7 +357,7 @@
size_t stack_offset = codegen->GetFirstRegisterSlotInSlowPath();
size_t orig_offset = stack_offset;
- const uint32_t core_spills = codegen->GetSlowPathSpills(locations, /* core_registers */ true);
+ const uint32_t core_spills = codegen->GetSlowPathSpills(locations, /* core_registers= */ true);
for (uint32_t i : LowToHighBits(core_spills)) {
DCHECK_LT(stack_offset, codegen->GetFrameSize() - codegen->FrameEntrySpillSize());
DCHECK_LT(i, kMaximumNumberOfExpectedRegisters);
@@ -368,7 +368,7 @@
CodeGeneratorARMVIXL* arm_codegen = down_cast<CodeGeneratorARMVIXL*>(codegen);
arm_codegen->GetAssembler()->LoadRegisterList(core_spills, orig_offset);
- uint32_t fp_spills = codegen->GetSlowPathSpills(locations, /* core_registers */ false);
+ uint32_t fp_spills = codegen->GetSlowPathSpills(locations, /* core_registers= */ false);
while (fp_spills != 0u) {
uint32_t begin = CTZ(fp_spills);
uint32_t tmp = fp_spills + (1u << begin);
@@ -1539,7 +1539,7 @@
vixl32::Label done_label;
vixl32::Label* const final_label = codegen->GetFinalLabel(cond, &done_label);
- __ B(condition.second, final_label, /* far_target */ false);
+ __ B(condition.second, final_label, /* is_far_target= */ false);
__ Mov(out, 1);
if (done_label.IsReferenced()) {
@@ -1934,7 +1934,7 @@
uint32_t encoded_data = entry.first;
vixl::aarch32::Label* slow_path_entry = &entry.second.label;
__ Bind(slow_path_entry);
- CompileBakerReadBarrierThunk(*GetAssembler(), encoded_data, /* debug_name */ nullptr);
+ CompileBakerReadBarrierThunk(*GetAssembler(), encoded_data, /* debug_name= */ nullptr);
}
GetAssembler()->FinalizeCode();
@@ -2159,7 +2159,7 @@
GetAssembler()->StoreToOffset(kStoreWord, temp, sp, GetStackOffsetOfShouldDeoptimizeFlag());
}
- MaybeGenerateMarkingRegisterCheck(/* code */ 1);
+ MaybeGenerateMarkingRegisterCheck(/* code= */ 1);
}
void CodeGeneratorARMVIXL::GenerateFrameExit() {
@@ -2427,7 +2427,7 @@
}
if (block->IsEntryBlock() && (previous != nullptr) && previous->IsSuspendCheck()) {
GenerateSuspendCheck(previous->AsSuspendCheck(), nullptr);
- codegen_->MaybeGenerateMarkingRegisterCheck(/* code */ 2);
+ codegen_->MaybeGenerateMarkingRegisterCheck(/* code= */ 2);
}
if (!codegen_->GoesToNextBlock(block, successor)) {
__ B(codegen_->GetLabelOf(successor));
@@ -2606,7 +2606,7 @@
nullptr : codegen_->GetLabelOf(true_successor);
vixl32::Label* false_target = codegen_->GoesToNextBlock(if_instr->GetBlock(), false_successor) ?
nullptr : codegen_->GetLabelOf(false_successor);
- GenerateTestAndBranch(if_instr, /* condition_input_index */ 0, true_target, false_target);
+ GenerateTestAndBranch(if_instr, /* condition_input_index= */ 0, true_target, false_target);
}
void LocationsBuilderARMVIXL::VisitDeoptimize(HDeoptimize* deoptimize) {
@@ -2625,9 +2625,9 @@
SlowPathCodeARMVIXL* slow_path =
deopt_slow_paths_.NewSlowPath<DeoptimizationSlowPathARMVIXL>(deoptimize);
GenerateTestAndBranch(deoptimize,
- /* condition_input_index */ 0,
+ /* condition_input_index= */ 0,
slow_path->GetEntryLabel(),
- /* false_target */ nullptr);
+ /* false_target= */ nullptr);
}
void LocationsBuilderARMVIXL::VisitShouldDeoptimizeFlag(HShouldDeoptimizeFlag* flag) {
@@ -2793,7 +2793,7 @@
}
}
- GenerateTestAndBranch(select, 2, true_target, false_target, /* far_target */ false);
+ GenerateTestAndBranch(select, 2, true_target, false_target, /* far_target= */ false);
codegen_->MoveLocation(out, src, type);
if (output_overlaps_with_condition_inputs) {
__ B(target);
@@ -3135,7 +3135,7 @@
void InstructionCodeGeneratorARMVIXL::VisitInvokeUnresolved(HInvokeUnresolved* invoke) {
codegen_->GenerateInvokeUnresolvedRuntimeCall(invoke);
- codegen_->MaybeGenerateMarkingRegisterCheck(/* code */ 3);
+ codegen_->MaybeGenerateMarkingRegisterCheck(/* code= */ 3);
}
void LocationsBuilderARMVIXL::VisitInvokeStaticOrDirect(HInvokeStaticOrDirect* invoke) {
@@ -3166,7 +3166,7 @@
DCHECK(!invoke->IsStaticWithExplicitClinitCheck());
if (TryGenerateIntrinsicCode(invoke, codegen_)) {
- codegen_->MaybeGenerateMarkingRegisterCheck(/* code */ 4);
+ codegen_->MaybeGenerateMarkingRegisterCheck(/* code= */ 4);
return;
}
@@ -3174,7 +3174,7 @@
codegen_->GenerateStaticOrDirectCall(
invoke, locations->HasTemps() ? locations->GetTemp(0) : Location::NoLocation());
- codegen_->MaybeGenerateMarkingRegisterCheck(/* code */ 5);
+ codegen_->MaybeGenerateMarkingRegisterCheck(/* code= */ 5);
}
void LocationsBuilderARMVIXL::HandleInvoke(HInvoke* invoke) {
@@ -3193,14 +3193,14 @@
void InstructionCodeGeneratorARMVIXL::VisitInvokeVirtual(HInvokeVirtual* invoke) {
if (TryGenerateIntrinsicCode(invoke, codegen_)) {
- codegen_->MaybeGenerateMarkingRegisterCheck(/* code */ 6);
+ codegen_->MaybeGenerateMarkingRegisterCheck(/* code= */ 6);
return;
}
codegen_->GenerateVirtualCall(invoke, invoke->GetLocations()->GetTemp(0));
DCHECK(!codegen_->IsLeafMethod());
- codegen_->MaybeGenerateMarkingRegisterCheck(/* code */ 7);
+ codegen_->MaybeGenerateMarkingRegisterCheck(/* code= */ 7);
}
void LocationsBuilderARMVIXL::VisitInvokeInterface(HInvokeInterface* invoke) {
@@ -3278,7 +3278,7 @@
DCHECK(!codegen_->IsLeafMethod());
}
- codegen_->MaybeGenerateMarkingRegisterCheck(/* code */ 8);
+ codegen_->MaybeGenerateMarkingRegisterCheck(/* code= */ 8);
}
void LocationsBuilderARMVIXL::VisitInvokePolymorphic(HInvokePolymorphic* invoke) {
@@ -3287,7 +3287,7 @@
void InstructionCodeGeneratorARMVIXL::VisitInvokePolymorphic(HInvokePolymorphic* invoke) {
codegen_->GenerateInvokePolymorphicCall(invoke);
- codegen_->MaybeGenerateMarkingRegisterCheck(/* code */ 9);
+ codegen_->MaybeGenerateMarkingRegisterCheck(/* code= */ 9);
}
void LocationsBuilderARMVIXL::VisitInvokeCustom(HInvokeCustom* invoke) {
@@ -3296,7 +3296,7 @@
void InstructionCodeGeneratorARMVIXL::VisitInvokeCustom(HInvokeCustom* invoke) {
codegen_->GenerateInvokeCustomCall(invoke);
- codegen_->MaybeGenerateMarkingRegisterCheck(/* code */ 10);
+ codegen_->MaybeGenerateMarkingRegisterCheck(/* code= */ 10);
}
void LocationsBuilderARMVIXL::VisitNeg(HNeg* neg) {
@@ -4013,7 +4013,7 @@
int64_t magic;
int shift;
- CalculateMagicAndShiftForDivRem(imm, false /* is_long */, &magic, &shift);
+ CalculateMagicAndShiftForDivRem(imm, /* is_long= */ false, &magic, &shift);
// TODO(VIXL): Change the static cast to Operand::From() after VIXL is fixed.
__ Mov(temp1, static_cast<int32_t>(magic));
@@ -4421,7 +4421,7 @@
__ Vcmp(op1, op2);
__ Vmrs(RegisterOrAPSR_nzcv(kPcCode), FPSCR);
- __ B(vs, &nan, /* far_target */ false); // if un-ordered, go to NaN handling.
+ __ B(vs, &nan, /* is_far_target= */ false); // if un-ordered, go to NaN handling.
// op1 <> op2
vixl32::ConditionType cond = is_min ? gt : lt;
@@ -4433,7 +4433,7 @@
__ vmov(cond, F32, out, op2);
}
// for <>(not equal), we've done min/max calculation.
- __ B(ne, final_label, /* far_target */ false);
+ __ B(ne, final_label, /* is_far_target= */ false);
// handle op1 == op2, max(+0.0,-0.0), min(+0.0,-0.0).
__ Vmov(temp1, op1);
@@ -4478,7 +4478,7 @@
__ Vcmp(op1, op2);
__ Vmrs(RegisterOrAPSR_nzcv(kPcCode), FPSCR);
- __ B(vs, &handle_nan_eq, /* far_target */ false); // if un-ordered, go to NaN handling.
+ __ B(vs, &handle_nan_eq, /* is_far_target= */ false); // if un-ordered, go to NaN handling.
// op1 <> op2
vixl32::ConditionType cond = is_min ? gt : lt;
@@ -4490,7 +4490,7 @@
__ vmov(cond, F64, out, op2);
}
// for <>(not equal), we've done min/max calculation.
- __ B(ne, final_label, /* far_target */ false);
+ __ B(ne, final_label, /* is_far_target= */ false);
// handle op1 == op2, max(+0.0,-0.0).
if (!is_min) {
@@ -4714,7 +4714,7 @@
__ And(shift_right, RegisterFrom(rhs), 0x1F);
__ Lsrs(shift_left, RegisterFrom(rhs), 6);
__ Rsb(LeaveFlags, shift_left, shift_right, Operand::From(kArmBitsPerWord));
- __ B(cc, &shift_by_32_plus_shift_right, /* far_target */ false);
+ __ B(cc, &shift_by_32_plus_shift_right, /* is_far_target= */ false);
// out_reg_hi = (reg_hi << shift_left) | (reg_lo >> shift_right).
// out_reg_lo = (reg_lo << shift_left) | (reg_hi >> shift_right).
@@ -5030,7 +5030,7 @@
void InstructionCodeGeneratorARMVIXL::VisitNewInstance(HNewInstance* instruction) {
codegen_->InvokeRuntime(instruction->GetEntrypoint(), instruction, instruction->GetDexPc());
CheckEntrypointTypes<kQuickAllocObjectWithChecks, void*, mirror::Class*>();
- codegen_->MaybeGenerateMarkingRegisterCheck(/* code */ 11);
+ codegen_->MaybeGenerateMarkingRegisterCheck(/* code= */ 11);
}
void LocationsBuilderARMVIXL::VisitNewArray(HNewArray* instruction) {
@@ -5048,7 +5048,7 @@
codegen_->InvokeRuntime(entrypoint, instruction, instruction->GetDexPc());
CheckEntrypointTypes<kQuickAllocArrayResolved, void*, mirror::Class*, int32_t>();
DCHECK(!codegen_->IsLeafMethod());
- codegen_->MaybeGenerateMarkingRegisterCheck(/* code */ 12);
+ codegen_->MaybeGenerateMarkingRegisterCheck(/* code= */ 12);
}
void LocationsBuilderARMVIXL::VisitParameterValue(HParameterValue* instruction) {
@@ -5170,8 +5170,8 @@
}
case DataType::Type::kInt64: {
__ Cmp(HighRegisterFrom(left), HighRegisterFrom(right)); // Signed compare.
- __ B(lt, &less, /* far_target */ false);
- __ B(gt, &greater, /* far_target */ false);
+ __ B(lt, &less, /* is_far_target= */ false);
+ __ B(gt, &greater, /* is_far_target= */ false);
// Emit move to `out` before the last `Cmp`, as `Mov` might affect the status flags.
__ Mov(out, 0);
__ Cmp(LowRegisterFrom(left), LowRegisterFrom(right)); // Unsigned compare.
@@ -5192,8 +5192,8 @@
UNREACHABLE();
}
- __ B(eq, final_label, /* far_target */ false);
- __ B(less_cond, &less, /* far_target */ false);
+ __ B(eq, final_label, /* is_far_target= */ false);
+ __ B(less_cond, &less, /* is_far_target= */ false);
__ Bind(&greater);
__ Mov(out, 1);
@@ -5608,7 +5608,7 @@
// Note that a potential implicit null check is handled in this
// CodeGeneratorARMVIXL::GenerateFieldLoadWithBakerReadBarrier call.
codegen_->GenerateFieldLoadWithBakerReadBarrier(
- instruction, out, base, offset, maybe_temp, /* needs_null_check */ true);
+ instruction, out, base, offset, maybe_temp, /* needs_null_check= */ true);
if (is_volatile) {
codegen_->GenerateMemoryBarrier(MemBarrierKind::kLoadAny);
}
@@ -5964,7 +5964,7 @@
__ Lsrs(length, length, 1u); // LSRS has a 16-bit encoding, TST (immediate) does not.
static_assert(static_cast<uint32_t>(mirror::StringCompressionFlag::kCompressed) == 0u,
"Expecting 0=compressed, 1=uncompressed");
- __ B(cs, &uncompressed_load, /* far_target */ false);
+ __ B(cs, &uncompressed_load, /* is_far_target= */ false);
GetAssembler()->LoadFromOffset(kLoadUnsignedByte,
RegisterFrom(out_loc),
obj,
@@ -6006,7 +6006,7 @@
__ Lsrs(length, length, 1u); // LSRS has a 16-bit encoding, TST (immediate) does not.
static_assert(static_cast<uint32_t>(mirror::StringCompressionFlag::kCompressed) == 0u,
"Expecting 0=compressed, 1=uncompressed");
- __ B(cs, &uncompressed_load, /* far_target */ false);
+ __ B(cs, &uncompressed_load, /* is_far_target= */ false);
__ Ldrb(RegisterFrom(out_loc), MemOperand(temp, RegisterFrom(index), vixl32::LSL, 0));
__ B(final_label);
__ Bind(&uncompressed_load);
@@ -6046,11 +6046,11 @@
obj,
data_offset,
maybe_temp,
- /* needs_null_check */ false);
+ /* needs_null_check= */ false);
} else {
Location temp = locations->GetTemp(0);
codegen_->GenerateArrayLoadWithBakerReadBarrier(
- out_loc, obj, data_offset, index, temp, /* needs_null_check */ false);
+ out_loc, obj, data_offset, index, temp, /* needs_null_check= */ false);
}
} else {
vixl32::Register out = OutputRegister(instruction);
@@ -6325,7 +6325,7 @@
if (instruction->StaticTypeOfArrayIsObjectArray()) {
vixl32::Label do_put;
- __ B(eq, &do_put, /* far_target */ false);
+ __ B(eq, &do_put, /* is_far_target= */ false);
// If heap poisoning is enabled, the `temp1` reference has
// not been unpoisoned yet; unpoison it now.
GetAssembler()->MaybeUnpoisonHeapReference(temp1);
@@ -6627,7 +6627,7 @@
return;
}
GenerateSuspendCheck(instruction, nullptr);
- codegen_->MaybeGenerateMarkingRegisterCheck(/* code */ 13);
+ codegen_->MaybeGenerateMarkingRegisterCheck(/* code= */ 13);
}
void InstructionCodeGeneratorARMVIXL::GenerateSuspendCheck(HSuspendCheck* instruction,
@@ -6975,7 +6975,7 @@
HLoadClass::LoadKind load_kind = cls->GetLoadKind();
if (load_kind == HLoadClass::LoadKind::kRuntimeCall) {
codegen_->GenerateLoadClassRuntimeCall(cls);
- codegen_->MaybeGenerateMarkingRegisterCheck(/* code */ 14);
+ codegen_->MaybeGenerateMarkingRegisterCheck(/* code= */ 14);
return;
}
DCHECK(!cls->NeedsAccessCheck());
@@ -7014,14 +7014,14 @@
CodeGeneratorARMVIXL::PcRelativePatchInfo* labels =
codegen_->NewBootImageRelRoPatch(codegen_->GetBootImageOffset(cls));
codegen_->EmitMovwMovtPlaceholder(labels, out);
- __ Ldr(out, MemOperand(out, /* offset */ 0));
+ __ Ldr(out, MemOperand(out, /* offset= */ 0));
break;
}
case HLoadClass::LoadKind::kBssEntry: {
CodeGeneratorARMVIXL::PcRelativePatchInfo* labels =
codegen_->NewTypeBssEntryPatch(cls->GetDexFile(), cls->GetTypeIndex());
codegen_->EmitMovwMovtPlaceholder(labels, out);
- codegen_->GenerateGcRootFieldLoad(cls, out_loc, out, /* offset */ 0, read_barrier_option);
+ codegen_->GenerateGcRootFieldLoad(cls, out_loc, out, /* offset= */ 0, read_barrier_option);
generate_null_check = true;
break;
}
@@ -7037,7 +7037,7 @@
cls->GetTypeIndex(),
cls->GetClass()));
// /* GcRoot<mirror::Class> */ out = *out
- codegen_->GenerateGcRootFieldLoad(cls, out_loc, out, /* offset */ 0, read_barrier_option);
+ codegen_->GenerateGcRootFieldLoad(cls, out_loc, out, /* offset= */ 0, read_barrier_option);
break;
}
case HLoadClass::LoadKind::kRuntimeCall:
@@ -7059,7 +7059,7 @@
} else {
__ Bind(slow_path->GetExitLabel());
}
- codegen_->MaybeGenerateMarkingRegisterCheck(/* code */ 15);
+ codegen_->MaybeGenerateMarkingRegisterCheck(/* code= */ 15);
}
}
@@ -7240,7 +7240,7 @@
CodeGeneratorARMVIXL::PcRelativePatchInfo* labels =
codegen_->NewBootImageRelRoPatch(codegen_->GetBootImageOffset(load));
codegen_->EmitMovwMovtPlaceholder(labels, out);
- __ Ldr(out, MemOperand(out, /* offset */ 0));
+ __ Ldr(out, MemOperand(out, /* offset= */ 0));
return;
}
case HLoadString::LoadKind::kBssEntry: {
@@ -7249,13 +7249,13 @@
codegen_->NewStringBssEntryPatch(load->GetDexFile(), load->GetStringIndex());
codegen_->EmitMovwMovtPlaceholder(labels, out);
codegen_->GenerateGcRootFieldLoad(
- load, out_loc, out, /* offset */ 0, kCompilerReadBarrierOption);
+ load, out_loc, out, /* offset= */ 0, kCompilerReadBarrierOption);
LoadStringSlowPathARMVIXL* slow_path =
new (codegen_->GetScopedAllocator()) LoadStringSlowPathARMVIXL(load);
codegen_->AddSlowPath(slow_path);
__ CompareAndBranchIfZero(out, slow_path->GetEntryLabel());
__ Bind(slow_path->GetExitLabel());
- codegen_->MaybeGenerateMarkingRegisterCheck(/* code */ 16);
+ codegen_->MaybeGenerateMarkingRegisterCheck(/* code= */ 16);
return;
}
case HLoadString::LoadKind::kJitBootImageAddress: {
@@ -7270,7 +7270,7 @@
load->GetString()));
// /* GcRoot<mirror::String> */ out = *out
codegen_->GenerateGcRootFieldLoad(
- load, out_loc, out, /* offset */ 0, kCompilerReadBarrierOption);
+ load, out_loc, out, /* offset= */ 0, kCompilerReadBarrierOption);
return;
}
default:
@@ -7283,7 +7283,7 @@
__ Mov(calling_convention.GetRegisterAt(0), load->GetStringIndex().index_);
codegen_->InvokeRuntime(kQuickResolveString, load, load->GetDexPc());
CheckEntrypointTypes<kQuickResolveString, void*, uint32_t>();
- codegen_->MaybeGenerateMarkingRegisterCheck(/* code */ 17);
+ codegen_->MaybeGenerateMarkingRegisterCheck(/* code= */ 17);
}
static int32_t GetExceptionTlsOffset() {
@@ -7415,7 +7415,7 @@
if (instruction->MustDoNullCheck()) {
DCHECK(!out.Is(obj));
__ Mov(out, 0);
- __ CompareAndBranchIfZero(obj, final_label, /* far_target */ false);
+ __ CompareAndBranchIfZero(obj, final_label, /* is_far_target= */ false);
}
switch (type_check_kind) {
@@ -7447,7 +7447,7 @@
__ it(eq);
__ mov(eq, out, 1);
} else {
- __ B(ne, final_label, /* far_target */ false);
+ __ B(ne, final_label, /* is_far_target= */ false);
__ Mov(out, 1);
}
@@ -7475,9 +7475,9 @@
maybe_temp_loc,
read_barrier_option);
// If `out` is null, we use it for the result, and jump to the final label.
- __ CompareAndBranchIfZero(out, final_label, /* far_target */ false);
+ __ CompareAndBranchIfZero(out, final_label, /* is_far_target= */ false);
__ Cmp(out, cls);
- __ B(ne, &loop, /* far_target */ false);
+ __ B(ne, &loop, /* is_far_target= */ false);
__ Mov(out, 1);
break;
}
@@ -7496,7 +7496,7 @@
vixl32::Label loop, success;
__ Bind(&loop);
__ Cmp(out, cls);
- __ B(eq, &success, /* far_target */ false);
+ __ B(eq, &success, /* is_far_target= */ false);
// /* HeapReference<Class> */ out = out->super_class_
GenerateReferenceLoadOneRegister(instruction,
out_loc,
@@ -7506,7 +7506,7 @@
// This is essentially a null check, but it sets the condition flags to the
// proper value for the code that follows the loop, i.e. not `eq`.
__ Cmp(out, 1);
- __ B(hs, &loop, /* far_target */ false);
+ __ B(hs, &loop, /* is_far_target= */ false);
// Since IT blocks longer than a 16-bit instruction are deprecated by ARMv8,
// we check that the output is in a low register, so that a 16-bit MOV
@@ -7551,7 +7551,7 @@
// Do an exact check.
vixl32::Label exact_check;
__ Cmp(out, cls);
- __ B(eq, &exact_check, /* far_target */ false);
+ __ B(eq, &exact_check, /* is_far_target= */ false);
// Otherwise, we need to check that the object's class is a non-primitive array.
// /* HeapReference<Class> */ out = out->component_type_
GenerateReferenceLoadOneRegister(instruction,
@@ -7560,7 +7560,7 @@
maybe_temp_loc,
read_barrier_option);
// If `out` is null, we use it for the result, and jump to the final label.
- __ CompareAndBranchIfZero(out, final_label, /* far_target */ false);
+ __ CompareAndBranchIfZero(out, final_label, /* is_far_target= */ false);
GetAssembler()->LoadFromOffset(kLoadUnsignedHalfword, out, out, primitive_offset);
static_assert(Primitive::kPrimNot == 0, "Expected 0 for kPrimNot");
__ Cmp(out, 0);
@@ -7582,7 +7582,7 @@
__ it(eq);
__ mov(eq, out, 1);
} else {
- __ B(ne, final_label, /* far_target */ false);
+ __ B(ne, final_label, /* is_far_target= */ false);
__ Bind(&exact_check);
__ Mov(out, 1);
}
@@ -7602,7 +7602,7 @@
__ Cmp(out, cls);
DCHECK(locations->OnlyCallsOnSlowPath());
slow_path = new (codegen_->GetScopedAllocator()) TypeCheckSlowPathARMVIXL(
- instruction, /* is_fatal */ false);
+ instruction, /* is_fatal= */ false);
codegen_->AddSlowPath(slow_path);
__ B(ne, slow_path->GetEntryLabel());
__ Mov(out, 1);
@@ -7631,7 +7631,7 @@
// This should also be beneficial for the other cases above.
DCHECK(locations->OnlyCallsOnSlowPath());
slow_path = new (codegen_->GetScopedAllocator()) TypeCheckSlowPathARMVIXL(
- instruction, /* is_fatal */ false);
+ instruction, /* is_fatal= */ false);
codegen_->AddSlowPath(slow_path);
__ B(slow_path->GetEntryLabel());
break;
@@ -7716,7 +7716,7 @@
vixl32::Label* final_label = codegen_->GetFinalLabel(instruction, &done);
// Avoid null check if we know obj is not null.
if (instruction->MustDoNullCheck()) {
- __ CompareAndBranchIfZero(obj, final_label, /* far_target */ false);
+ __ CompareAndBranchIfZero(obj, final_label, /* is_far_target= */ false);
}
switch (type_check_kind) {
@@ -7763,7 +7763,7 @@
// Otherwise, compare the classes.
__ Cmp(temp, cls);
- __ B(ne, &loop, /* far_target */ false);
+ __ B(ne, &loop, /* is_far_target= */ false);
break;
}
@@ -7780,7 +7780,7 @@
vixl32::Label loop;
__ Bind(&loop);
__ Cmp(temp, cls);
- __ B(eq, final_label, /* far_target */ false);
+ __ B(eq, final_label, /* is_far_target= */ false);
// /* HeapReference<Class> */ temp = temp->super_class_
GenerateReferenceLoadOneRegister(instruction,
@@ -7808,7 +7808,7 @@
// Do an exact check.
__ Cmp(temp, cls);
- __ B(eq, final_label, /* far_target */ false);
+ __ B(eq, final_label, /* is_far_target= */ false);
// Otherwise, we need to check that the object's class is a non-primitive array.
// /* HeapReference<Class> */ temp = temp->component_type_
@@ -7872,7 +7872,7 @@
__ Sub(RegisterFrom(maybe_temp2_loc), RegisterFrom(maybe_temp2_loc), 2);
// Compare the classes and continue the loop if they do not match.
__ Cmp(cls, RegisterFrom(maybe_temp3_loc));
- __ B(ne, &start_loop, /* far_target */ false);
+ __ B(ne, &start_loop, /* is_far_target= */ false);
break;
}
@@ -7913,7 +7913,7 @@
} else {
CheckEntrypointTypes<kQuickUnlockObject, void, mirror::Object*>();
}
- codegen_->MaybeGenerateMarkingRegisterCheck(/* code */ 18);
+ codegen_->MaybeGenerateMarkingRegisterCheck(/* code= */ 18);
}
void LocationsBuilderARMVIXL::VisitAnd(HAnd* instruction) {
@@ -8268,7 +8268,7 @@
// Load with fast path based Baker's read barrier.
// /* HeapReference<Object> */ out = *(out + offset)
codegen_->GenerateFieldLoadWithBakerReadBarrier(
- instruction, out, out_reg, offset, maybe_temp, /* needs_null_check */ false);
+ instruction, out, out_reg, offset, maybe_temp, /* needs_null_check= */ false);
} else {
// Load with slow path based read barrier.
// Save the value of `out` into `maybe_temp` before overwriting it
@@ -8303,7 +8303,7 @@
// Load with fast path based Baker's read barrier.
// /* HeapReference<Object> */ out = *(obj + offset)
codegen_->GenerateFieldLoadWithBakerReadBarrier(
- instruction, out, obj_reg, offset, maybe_temp, /* needs_null_check */ false);
+ instruction, out, obj_reg, offset, maybe_temp, /* needs_null_check= */ false);
} else {
// Load with slow path based read barrier.
// /* HeapReference<Object> */ out = *(obj + offset)
@@ -8384,7 +8384,7 @@
// Note that GC roots are not affected by heap poisoning, thus we
// do not have to unpoison `root_reg` here.
}
- MaybeGenerateMarkingRegisterCheck(/* code */ 19);
+ MaybeGenerateMarkingRegisterCheck(/* code= */ 19);
}
void CodeGeneratorARMVIXL::GenerateUnsafeCasOldValueAddWithBakerReadBarrier(
@@ -8484,7 +8484,7 @@
narrow ? BAKER_MARK_INTROSPECTION_FIELD_LDR_NARROW_OFFSET
: BAKER_MARK_INTROSPECTION_FIELD_LDR_WIDE_OFFSET);
}
- MaybeGenerateMarkingRegisterCheck(/* code */ 20, /* temp_loc */ LocationFrom(ip));
+ MaybeGenerateMarkingRegisterCheck(/* code= */ 20, /* temp_loc= */ LocationFrom(ip));
}
void CodeGeneratorARMVIXL::GenerateFieldLoadWithBakerReadBarrier(HInstruction* instruction,
@@ -8572,7 +8572,7 @@
DCHECK_EQ(old_offset - GetVIXLAssembler()->GetBuffer()->GetCursorOffset(),
BAKER_MARK_INTROSPECTION_ARRAY_LDR_OFFSET);
}
- MaybeGenerateMarkingRegisterCheck(/* code */ 21, /* temp_loc */ LocationFrom(ip));
+ MaybeGenerateMarkingRegisterCheck(/* code= */ 21, /* temp_loc= */ LocationFrom(ip));
}
void CodeGeneratorARMVIXL::MaybeGenerateMarkingRegisterCheck(int code, Location temp_loc) {
@@ -8815,12 +8815,12 @@
CodeGeneratorARMVIXL::PcRelativePatchInfo* CodeGeneratorARMVIXL::NewBootImageIntrinsicPatch(
uint32_t intrinsic_data) {
- return NewPcRelativePatch(/* dex_file */ nullptr, intrinsic_data, &boot_image_intrinsic_patches_);
+ return NewPcRelativePatch(/* dex_file= */ nullptr, intrinsic_data, &boot_image_intrinsic_patches_);
}
CodeGeneratorARMVIXL::PcRelativePatchInfo* CodeGeneratorARMVIXL::NewBootImageRelRoPatch(
uint32_t boot_image_offset) {
- return NewPcRelativePatch(/* dex_file */ nullptr,
+ return NewPcRelativePatch(/* dex_file= */ nullptr,
boot_image_offset,
&boot_image_method_patches_);
}
@@ -8891,7 +8891,7 @@
return jit_string_patches_.GetOrCreate(
StringReference(&dex_file, string_index),
[this]() {
- return GetAssembler()->CreateLiteralDestroyedWithPool<uint32_t>(/* placeholder */ 0u);
+ return GetAssembler()->CreateLiteralDestroyedWithPool<uint32_t>(/* value= */ 0u);
});
}
@@ -8902,7 +8902,7 @@
return jit_class_patches_.GetOrCreate(
TypeReference(&dex_file, type_index),
[this]() {
- return GetAssembler()->CreateLiteralDestroyedWithPool<uint32_t>(/* placeholder */ 0u);
+ return GetAssembler()->CreateLiteralDestroyedWithPool<uint32_t>(/* value= */ 0u);
});
}
@@ -8916,7 +8916,7 @@
CodeGeneratorARMVIXL::PcRelativePatchInfo* labels =
NewBootImageRelRoPatch(boot_image_reference);
EmitMovwMovtPlaceholder(labels, reg);
- __ Ldr(reg, MemOperand(reg, /* offset */ 0));
+ __ Ldr(reg, MemOperand(reg, /* offset= */ 0));
} else {
DCHECK(Runtime::Current()->UseJitCompilation());
gc::Heap* heap = Runtime::Current()->GetHeap();
@@ -9061,7 +9061,7 @@
return map->GetOrCreate(
value,
[this, value]() {
- return GetAssembler()->CreateLiteralDestroyedWithPool<uint32_t>(/* placeholder */ value);
+ return GetAssembler()->CreateLiteralDestroyedWithPool<uint32_t>(/* value= */ value);
});
}
@@ -9288,9 +9288,9 @@
CodeBufferCheckScope::kMaximumSize);
// TODO(VIXL): Think about using mov instead of movw.
__ bind(&labels->movw_label);
- __ movw(out, /* placeholder */ 0u);
+ __ movw(out, /* operand= */ 0u);
__ bind(&labels->movt_label);
- __ movt(out, /* placeholder */ 0u);
+ __ movt(out, /* operand= */ 0u);
__ bind(&labels->add_pc_label);
__ add(out, out, pc);
}
@@ -9313,7 +9313,7 @@
static_assert(ReadBarrier::NonGrayState() == 0, "Expecting non-gray to have value 0");
static_assert(ReadBarrier::GrayState() == 1, "Expecting gray to have value 1");
__ Tst(ip, Operand(LockWord::kReadBarrierStateMaskShifted));
- __ B(ne, slow_path, /* is_far_target */ false);
+ __ B(ne, slow_path, /* is_far_target= */ false);
// To throw NPE, we return to the fast path; the artificial dependence below does not matter.
if (throw_npe != nullptr) {
__ Bind(throw_npe);
@@ -9360,7 +9360,7 @@
vixl32::Label* throw_npe = nullptr;
if (GetCompilerOptions().GetImplicitNullChecks() && holder_reg.Is(base_reg)) {
throw_npe = &throw_npe_label;
- __ CompareAndBranchIfZero(holder_reg, throw_npe, /* is_far_target */ false);
+ __ CompareAndBranchIfZero(holder_reg, throw_npe, /* is_far_target= */ false);
}
// Check if the holder is gray and, if not, add fake dependency to the base register
// and return to the LDR instruction to load the reference. Otherwise, use introspection
@@ -9437,7 +9437,7 @@
UseScratchRegisterScope temps(assembler.GetVIXLAssembler());
temps.Exclude(ip);
vixl32::Label return_label, not_marked, forwarding_address;
- __ CompareAndBranchIfZero(root_reg, &return_label, /* is_far_target */ false);
+ __ CompareAndBranchIfZero(root_reg, &return_label, /* is_far_target= */ false);
MemOperand lock_word(root_reg, mirror::Object::MonitorOffset().Int32Value());
__ Ldr(ip, lock_word);
__ Tst(ip, LockWord::kMarkBitStateMaskShifted);
diff --git a/compiler/optimizing/code_generator_mips.cc b/compiler/optimizing/code_generator_mips.cc
index c536dd3..f7f37db 100644
--- a/compiler/optimizing/code_generator_mips.cc
+++ b/compiler/optimizing/code_generator_mips.cc
@@ -587,7 +587,7 @@
mips_codegen->InvokeRuntimeWithoutRecordingPcInfo(entry_point_offset,
instruction_,
this,
- /* direct */ false);
+ /* direct= */ false);
}
__ B(GetExitLabel());
}
@@ -681,7 +681,7 @@
mips_codegen->InvokeRuntimeWithoutRecordingPcInfo(entry_point_offset,
instruction_,
this,
- /* direct */ false);
+ /* direct= */ false);
// If the new reference is different from the old reference,
// update the field in the holder (`*(obj_ + field_offset_)`).
@@ -1167,9 +1167,9 @@
__ Move(r2_l, TMP);
__ Move(r2_h, AT);
} else if (loc1.IsStackSlot() && loc2.IsStackSlot()) {
- Exchange(loc1.GetStackIndex(), loc2.GetStackIndex(), /* double_slot */ false);
+ Exchange(loc1.GetStackIndex(), loc2.GetStackIndex(), /* double_slot= */ false);
} else if (loc1.IsDoubleStackSlot() && loc2.IsDoubleStackSlot()) {
- Exchange(loc1.GetStackIndex(), loc2.GetStackIndex(), /* double_slot */ true);
+ Exchange(loc1.GetStackIndex(), loc2.GetStackIndex(), /* double_slot= */ true);
} else if (loc1.IsSIMDStackSlot() && loc2.IsSIMDStackSlot()) {
ExchangeQuadSlots(loc1.GetStackIndex(), loc2.GetStackIndex());
} else if ((loc1.IsRegister() && loc2.IsStackSlot()) ||
@@ -1654,14 +1654,14 @@
uint32_t intrinsic_data,
const PcRelativePatchInfo* info_high) {
return NewPcRelativePatch(
- /* dex_file */ nullptr, intrinsic_data, info_high, &boot_image_intrinsic_patches_);
+ /* dex_file= */ nullptr, intrinsic_data, info_high, &boot_image_intrinsic_patches_);
}
CodeGeneratorMIPS::PcRelativePatchInfo* CodeGeneratorMIPS::NewBootImageRelRoPatch(
uint32_t boot_image_offset,
const PcRelativePatchInfo* info_high) {
return NewPcRelativePatch(
- /* dex_file */ nullptr, boot_image_offset, info_high, &boot_image_method_patches_);
+ /* dex_file= */ nullptr, boot_image_offset, info_high, &boot_image_method_patches_);
}
CodeGeneratorMIPS::PcRelativePatchInfo* CodeGeneratorMIPS::NewBootImageMethodPatch(
@@ -1737,7 +1737,7 @@
__ Bind(&info_high->label);
__ Bind(&info_high->pc_rel_label);
// Add the high half of a 32-bit offset to PC.
- __ Auipc(out, /* placeholder */ 0x1234);
+ __ Auipc(out, /* imm16= */ 0x1234);
__ SetReorder(reordering);
} else {
// If base is ZERO, emit NAL to obtain the actual base.
@@ -1746,7 +1746,7 @@
__ Nal();
}
__ Bind(&info_high->label);
- __ Lui(out, /* placeholder */ 0x1234);
+ __ Lui(out, /* imm16= */ 0x1234);
// If we emitted the NAL, bind the pc_rel_label, otherwise base is a register holding
// the HMipsComputeBaseMethodAddress which has its own label stored in MipsAssembler.
if (base == ZERO) {
@@ -1764,13 +1764,13 @@
if (GetCompilerOptions().IsBootImage()) {
PcRelativePatchInfo* info_high = NewBootImageIntrinsicPatch(boot_image_reference);
PcRelativePatchInfo* info_low = NewBootImageIntrinsicPatch(boot_image_reference, info_high);
- EmitPcRelativeAddressPlaceholderHigh(info_high, TMP, /* base */ ZERO);
- __ Addiu(reg, TMP, /* placeholder */ 0x5678, &info_low->label);
+ EmitPcRelativeAddressPlaceholderHigh(info_high, TMP, /* base= */ ZERO);
+ __ Addiu(reg, TMP, /* imm16= */ 0x5678, &info_low->label);
} else if (GetCompilerOptions().GetCompilePic()) {
PcRelativePatchInfo* info_high = NewBootImageRelRoPatch(boot_image_reference);
PcRelativePatchInfo* info_low = NewBootImageRelRoPatch(boot_image_reference, info_high);
- EmitPcRelativeAddressPlaceholderHigh(info_high, reg, /* base */ ZERO);
- __ Lw(reg, reg, /* placeholder */ 0x5678, &info_low->label);
+ EmitPcRelativeAddressPlaceholderHigh(info_high, reg, /* base= */ ZERO);
+ __ Lw(reg, reg, /* imm16= */ 0x5678, &info_low->label);
} else {
DCHECK(Runtime::Current()->UseJitCompilation());
gc::Heap* heap = Runtime::Current()->GetHeap();
@@ -1793,8 +1793,8 @@
PcRelativePatchInfo* info_high = NewBootImageTypePatch(*target_method.dex_file, type_idx);
PcRelativePatchInfo* info_low =
NewBootImageTypePatch(*target_method.dex_file, type_idx, info_high);
- EmitPcRelativeAddressPlaceholderHigh(info_high, argument, /* base */ ZERO);
- __ Addiu(argument, argument, /* placeholder */ 0x5678, &info_low->label);
+ EmitPcRelativeAddressPlaceholderHigh(info_high, argument, /* base= */ ZERO);
+ __ Addiu(argument, argument, /* imm16= */ 0x5678, &info_low->label);
} else {
LoadBootImageAddress(argument, boot_image_offset);
}
@@ -2579,7 +2579,7 @@
__ Or(dst_high, dst_high, TMP);
__ Andi(TMP, rhs_reg, kMipsBitsPerWord);
if (isR6) {
- __ Beqzc(TMP, &done, /* is_bare */ true);
+ __ Beqzc(TMP, &done, /* is_bare= */ true);
__ Move(dst_high, dst_low);
__ Move(dst_low, ZERO);
} else {
@@ -2595,7 +2595,7 @@
__ Or(dst_low, dst_low, TMP);
__ Andi(TMP, rhs_reg, kMipsBitsPerWord);
if (isR6) {
- __ Beqzc(TMP, &done, /* is_bare */ true);
+ __ Beqzc(TMP, &done, /* is_bare= */ true);
__ Move(dst_low, dst_high);
__ Sra(dst_high, dst_high, 31);
} else {
@@ -2612,7 +2612,7 @@
__ Or(dst_low, dst_low, TMP);
__ Andi(TMP, rhs_reg, kMipsBitsPerWord);
if (isR6) {
- __ Beqzc(TMP, &done, /* is_bare */ true);
+ __ Beqzc(TMP, &done, /* is_bare= */ true);
__ Move(dst_low, dst_high);
__ Move(dst_high, ZERO);
} else {
@@ -2631,7 +2631,7 @@
__ Or(dst_high, dst_high, TMP);
__ Andi(TMP, rhs_reg, kMipsBitsPerWord);
if (isR6) {
- __ Beqzc(TMP, &done, /* is_bare */ true);
+ __ Beqzc(TMP, &done, /* is_bare= */ true);
__ Move(TMP, dst_high);
__ Move(dst_high, dst_low);
__ Move(dst_low, TMP);
@@ -2862,7 +2862,7 @@
obj,
offset,
temp,
- /* needs_null_check */ false);
+ /* needs_null_check= */ false);
} else {
codegen_->GenerateArrayLoadWithBakerReadBarrier(instruction,
out_loc,
@@ -2870,7 +2870,7 @@
data_offset,
index,
temp,
- /* needs_null_check */ false);
+ /* needs_null_check= */ false);
}
} else {
Register out = out_loc.AsRegister<Register>();
@@ -4104,7 +4104,7 @@
int64_t magic;
int shift;
- CalculateMagicAndShiftForDivRem(imm, false /* is_long */, &magic, &shift);
+ CalculateMagicAndShiftForDivRem(imm, false /* is_long= */, &magic, &shift);
bool isR6 = codegen_->GetInstructionSetFeatures().IsR6();
@@ -5948,7 +5948,7 @@
nullptr : codegen_->GetLabelOf(true_successor);
MipsLabel* false_target = codegen_->GoesToNextBlock(if_instr->GetBlock(), false_successor) ?
nullptr : codegen_->GetLabelOf(false_successor);
- GenerateTestAndBranch(if_instr, /* condition_input_index */ 0, true_target, false_target);
+ GenerateTestAndBranch(if_instr, /* condition_input_index= */ 0, true_target, false_target);
}
void LocationsBuilderMIPS::VisitDeoptimize(HDeoptimize* deoptimize) {
@@ -5967,9 +5967,9 @@
SlowPathCodeMIPS* slow_path =
deopt_slow_paths_.NewSlowPath<DeoptimizationSlowPathMIPS>(deoptimize);
GenerateTestAndBranch(deoptimize,
- /* condition_input_index */ 0,
+ /* condition_input_index= */ 0,
slow_path->GetEntryLabel(),
- /* false_target */ nullptr);
+ /* false_target= */ nullptr);
}
// This function returns true if a conditional move can be generated for HSelect.
@@ -5983,7 +5983,7 @@
// of common logic.
static bool CanMoveConditionally(HSelect* select, bool is_r6, LocationSummary* locations_to_set) {
bool materialized = IsBooleanValueOrMaterializedCondition(select->GetCondition());
- HInstruction* cond = select->InputAt(/* condition_input_index */ 2);
+ HInstruction* cond = select->InputAt(/* i= */ 2);
HCondition* condition = cond->AsCondition();
DataType::Type cond_type =
@@ -6216,7 +6216,7 @@
Location src = locations->InAt(1);
Register src_reg = ZERO;
Register src_reg_high = ZERO;
- HInstruction* cond = select->InputAt(/* condition_input_index */ 2);
+ HInstruction* cond = select->InputAt(/* i= */ 2);
Register cond_reg = TMP;
int cond_cc = 0;
DataType::Type cond_type = DataType::Type::kInt32;
@@ -6224,7 +6224,7 @@
DataType::Type dst_type = select->GetType();
if (IsBooleanValueOrMaterializedCondition(cond)) {
- cond_reg = locations->InAt(/* condition_input_index */ 2).AsRegister<Register>();
+ cond_reg = locations->InAt(/* at= */ 2).AsRegister<Register>();
} else {
HCondition* condition = cond->AsCondition();
LocationSummary* cond_locations = cond->GetLocations();
@@ -6337,7 +6337,7 @@
Location dst = locations->Out();
Location false_src = locations->InAt(0);
Location true_src = locations->InAt(1);
- HInstruction* cond = select->InputAt(/* condition_input_index */ 2);
+ HInstruction* cond = select->InputAt(/* i= */ 2);
Register cond_reg = TMP;
FRegister fcond_reg = FTMP;
DataType::Type cond_type = DataType::Type::kInt32;
@@ -6345,7 +6345,7 @@
DataType::Type dst_type = select->GetType();
if (IsBooleanValueOrMaterializedCondition(cond)) {
- cond_reg = locations->InAt(/* condition_input_index */ 2).AsRegister<Register>();
+ cond_reg = locations->InAt(/* at= */ 2).AsRegister<Register>();
} else {
HCondition* condition = cond->AsCondition();
LocationSummary* cond_locations = cond->GetLocations();
@@ -6526,7 +6526,7 @@
void InstructionCodeGeneratorMIPS::VisitSelect(HSelect* select) {
bool is_r6 = codegen_->GetInstructionSetFeatures().IsR6();
- if (CanMoveConditionally(select, is_r6, /* locations_to_set */ nullptr)) {
+ if (CanMoveConditionally(select, is_r6, /* locations_to_set= */ nullptr)) {
if (is_r6) {
GenConditionalMoveR6(select);
} else {
@@ -6536,8 +6536,8 @@
LocationSummary* locations = select->GetLocations();
MipsLabel false_target;
GenerateTestAndBranch(select,
- /* condition_input_index */ 2,
- /* true_target */ nullptr,
+ /* condition_input_index= */ 2,
+ /* true_target= */ nullptr,
&false_target);
codegen_->MoveLocation(locations->Out(), locations->InAt(1), select->GetType());
__ Bind(&false_target);
@@ -6696,7 +6696,7 @@
obj,
offset,
temp_loc,
- /* needs_null_check */ true);
+ /* needs_null_check= */ true);
if (is_volatile) {
GenerateMemoryBarrier(MemBarrierKind::kLoadAny);
}
@@ -6929,7 +6929,7 @@
out_reg,
offset,
maybe_temp,
- /* needs_null_check */ false);
+ /* needs_null_check= */ false);
} else {
// Load with slow path based read barrier.
// Save the value of `out` into `maybe_temp` before overwriting it
@@ -6970,7 +6970,7 @@
obj_reg,
offset,
maybe_temp,
- /* needs_null_check */ false);
+ /* needs_null_check= */ false);
} else {
// Load with slow path based read barrier.
// /* HeapReference<Object> */ out = *(obj + offset)
@@ -7061,7 +7061,7 @@
__ AddUpper(base, obj, offset_high);
}
MipsLabel skip_call;
- __ Beqz(T9, &skip_call, /* is_bare */ true);
+ __ Beqz(T9, &skip_call, /* is_bare= */ true);
if (label_low != nullptr) {
DCHECK(short_offset);
__ Bind(label_low);
@@ -7216,11 +7216,11 @@
MipsLabel skip_call;
if (short_offset) {
if (isR6) {
- __ Beqzc(T9, &skip_call, /* is_bare */ true);
+ __ Beqzc(T9, &skip_call, /* is_bare= */ true);
__ Nop(); // In forbidden slot.
__ Jialc(T9, thunk_disp);
} else {
- __ Beqz(T9, &skip_call, /* is_bare */ true);
+ __ Beqz(T9, &skip_call, /* is_bare= */ true);
__ Addiu(T9, T9, thunk_disp); // In delay slot.
__ Jalr(T9);
__ Nop(); // In delay slot.
@@ -7228,13 +7228,13 @@
__ Bind(&skip_call);
} else {
if (isR6) {
- __ Beqz(T9, &skip_call, /* is_bare */ true);
+ __ Beqz(T9, &skip_call, /* is_bare= */ true);
__ Aui(base, obj, offset_high); // In delay slot.
__ Jialc(T9, thunk_disp);
__ Bind(&skip_call);
} else {
__ Lui(base, offset_high);
- __ Beqz(T9, &skip_call, /* is_bare */ true);
+ __ Beqz(T9, &skip_call, /* is_bare= */ true);
__ Addiu(T9, T9, thunk_disp); // In delay slot.
__ Jalr(T9);
__ Bind(&skip_call);
@@ -7311,7 +7311,7 @@
// We will not do the explicit null check in the thunk as some form of a null check
// must've been done earlier.
DCHECK(!needs_null_check);
- const int thunk_disp = GetBakerMarkFieldArrayThunkDisplacement(obj, /* short_offset */ false);
+ const int thunk_disp = GetBakerMarkFieldArrayThunkDisplacement(obj, /* short_offset= */ false);
// Loading the entrypoint does not require a load acquire since it is only changed when
// threads are suspended or running a checkpoint.
__ LoadFromOffset(kLoadWord, T9, TR, entry_point_offset);
@@ -7321,13 +7321,13 @@
: index.AsRegister<Register>();
MipsLabel skip_call;
if (GetInstructionSetFeatures().IsR6()) {
- __ Beqz(T9, &skip_call, /* is_bare */ true);
+ __ Beqz(T9, &skip_call, /* is_bare= */ true);
__ Lsa(TMP, index_reg, obj, scale_factor); // In delay slot.
__ Jialc(T9, thunk_disp);
__ Bind(&skip_call);
} else {
__ Sll(TMP, index_reg, scale_factor);
- __ Beqz(T9, &skip_call, /* is_bare */ true);
+ __ Beqz(T9, &skip_call, /* is_bare= */ true);
__ Addiu(T9, T9, thunk_disp); // In delay slot.
__ Jalr(T9);
__ Bind(&skip_call);
@@ -7442,7 +7442,7 @@
ReadBarrierMarkAndUpdateFieldSlowPathMIPS(instruction,
ref,
obj,
- /* field_offset */ index,
+ /* field_offset= */ index,
temp_reg);
} else {
slow_path = new (GetScopedAllocator()) ReadBarrierMarkSlowPathMIPS(instruction, ref);
@@ -7705,7 +7705,7 @@
kWithoutReadBarrier);
DCHECK(locations->OnlyCallsOnSlowPath());
slow_path = new (codegen_->GetScopedAllocator()) TypeCheckSlowPathMIPS(
- instruction, /* is_fatal */ false);
+ instruction, /* is_fatal= */ false);
codegen_->AddSlowPath(slow_path);
__ Bne(out, cls.AsRegister<Register>(), slow_path->GetEntryLabel());
__ LoadConst32(out, 1);
@@ -7734,7 +7734,7 @@
// This should also be beneficial for the other cases above.
DCHECK(locations->OnlyCallsOnSlowPath());
slow_path = new (codegen_->GetScopedAllocator()) TypeCheckSlowPathMIPS(
- instruction, /* is_fatal */ false);
+ instruction, /* is_fatal= */ false);
codegen_->AddSlowPath(slow_path);
__ B(slow_path->GetEntryLabel());
break;
@@ -8001,7 +8001,7 @@
NewBootImageMethodPatch(invoke->GetTargetMethod(), info_high);
Register temp_reg = temp.AsRegister<Register>();
EmitPcRelativeAddressPlaceholderHigh(info_high, TMP, base_reg);
- __ Addiu(temp_reg, TMP, /* placeholder */ 0x5678, &info_low->label);
+ __ Addiu(temp_reg, TMP, /* imm16= */ 0x5678, &info_low->label);
break;
}
case HInvokeStaticOrDirect::MethodLoadKind::kBootImageRelRo: {
@@ -8010,7 +8010,7 @@
PcRelativePatchInfo* info_low = NewBootImageRelRoPatch(boot_image_offset, info_high);
Register temp_reg = temp.AsRegister<Register>();
EmitPcRelativeAddressPlaceholderHigh(info_high, TMP, base_reg);
- __ Lw(temp_reg, TMP, /* placeholder */ 0x5678, &info_low->label);
+ __ Lw(temp_reg, TMP, /* imm16= */ 0x5678, &info_low->label);
break;
}
case HInvokeStaticOrDirect::MethodLoadKind::kBssEntry: {
@@ -8020,7 +8020,7 @@
MethodReference(&GetGraph()->GetDexFile(), invoke->GetDexMethodIndex()), info_high);
Register temp_reg = temp.AsRegister<Register>();
EmitPcRelativeAddressPlaceholderHigh(info_high, TMP, base_reg);
- __ Lw(temp_reg, TMP, /* placeholder */ 0x5678, &info_low->label);
+ __ Lw(temp_reg, TMP, /* imm16= */ 0x5678, &info_low->label);
break;
}
case HInvokeStaticOrDirect::MethodLoadKind::kJitDirectAddress:
@@ -8226,7 +8226,7 @@
codegen_->EmitPcRelativeAddressPlaceholderHigh(info_high,
out,
base_or_current_method_reg);
- __ Addiu(out, out, /* placeholder */ 0x5678, &info_low->label);
+ __ Addiu(out, out, /* imm16= */ 0x5678, &info_low->label);
break;
}
case HLoadClass::LoadKind::kBootImageRelRo: {
@@ -8239,7 +8239,7 @@
codegen_->EmitPcRelativeAddressPlaceholderHigh(info_high,
out,
base_or_current_method_reg);
- __ Lw(out, out, /* placeholder */ 0x5678, &info_low->label);
+ __ Lw(out, out, /* imm16= */ 0x5678, &info_low->label);
break;
}
case HLoadClass::LoadKind::kBssEntry: {
@@ -8253,7 +8253,7 @@
GenerateGcRootFieldLoad(cls,
out_loc,
out,
- /* placeholder */ 0x5678,
+ /* offset= */ 0x5678,
read_barrier_option,
&info_low->label);
generate_null_check = true;
@@ -8278,12 +8278,12 @@
cls->GetClass());
bool reordering = __ SetReorder(false);
__ Bind(&info->high_label);
- __ Lui(out, /* placeholder */ 0x1234);
+ __ Lui(out, /* imm16= */ 0x1234);
__ SetReorder(reordering);
GenerateGcRootFieldLoad(cls,
out_loc,
out,
- /* placeholder */ 0x5678,
+ /* offset= */ 0x5678,
read_barrier_option,
&info->low_label);
break;
@@ -8432,7 +8432,7 @@
codegen_->EmitPcRelativeAddressPlaceholderHigh(info_high,
out,
base_or_current_method_reg);
- __ Addiu(out, out, /* placeholder */ 0x5678, &info_low->label);
+ __ Addiu(out, out, /* imm16= */ 0x5678, &info_low->label);
return;
}
case HLoadString::LoadKind::kBootImageRelRo: {
@@ -8445,7 +8445,7 @@
codegen_->EmitPcRelativeAddressPlaceholderHigh(info_high,
out,
base_or_current_method_reg);
- __ Lw(out, out, /* placeholder */ 0x5678, &info_low->label);
+ __ Lw(out, out, /* imm16= */ 0x5678, &info_low->label);
return;
}
case HLoadString::LoadKind::kBssEntry: {
@@ -8460,7 +8460,7 @@
GenerateGcRootFieldLoad(load,
out_loc,
out,
- /* placeholder */ 0x5678,
+ /* offset= */ 0x5678,
kCompilerReadBarrierOption,
&info_low->label);
SlowPathCodeMIPS* slow_path =
@@ -8489,12 +8489,12 @@
load->GetString());
bool reordering = __ SetReorder(false);
__ Bind(&info->high_label);
- __ Lui(out, /* placeholder */ 0x1234);
+ __ Lui(out, /* imm16= */ 0x1234);
__ SetReorder(reordering);
GenerateGcRootFieldLoad(load,
out_loc,
out,
- /* placeholder */ 0x5678,
+ /* offset= */ 0x5678,
kCompilerReadBarrierOption,
&info->low_label);
return;
diff --git a/compiler/optimizing/code_generator_mips64.cc b/compiler/optimizing/code_generator_mips64.cc
index 016aac7..8b6328f 100644
--- a/compiler/optimizing/code_generator_mips64.cc
+++ b/compiler/optimizing/code_generator_mips64.cc
@@ -953,7 +953,7 @@
: CodeGenerator(graph,
kNumberOfGpuRegisters,
kNumberOfFpuRegisters,
- /* number_of_register_pairs */ 0,
+ /* number_of_register_pairs= */ 0,
ComputeRegisterMask(reinterpret_cast<const int*>(kCoreCalleeSaves),
arraysize(kCoreCalleeSaves)),
ComputeRegisterMask(reinterpret_cast<const int*>(kFpuCalleeSaves),
@@ -1581,14 +1581,14 @@
uint32_t intrinsic_data,
const PcRelativePatchInfo* info_high) {
return NewPcRelativePatch(
- /* dex_file */ nullptr, intrinsic_data, info_high, &boot_image_intrinsic_patches_);
+ /* dex_file= */ nullptr, intrinsic_data, info_high, &boot_image_intrinsic_patches_);
}
CodeGeneratorMIPS64::PcRelativePatchInfo* CodeGeneratorMIPS64::NewBootImageRelRoPatch(
uint32_t boot_image_offset,
const PcRelativePatchInfo* info_high) {
return NewPcRelativePatch(
- /* dex_file */ nullptr, boot_image_offset, info_high, &boot_image_method_patches_);
+ /* dex_file= */ nullptr, boot_image_offset, info_high, &boot_image_method_patches_);
}
CodeGeneratorMIPS64::PcRelativePatchInfo* CodeGeneratorMIPS64::NewBootImageMethodPatch(
@@ -1665,7 +1665,7 @@
DCHECK(!info_high->patch_info_high);
__ Bind(&info_high->label);
// Add the high half of a 32-bit offset to PC.
- __ Auipc(out, /* placeholder */ 0x1234);
+ __ Auipc(out, /* imm16= */ 0x1234);
// A following instruction will add the sign-extended low half of the 32-bit
// offset to `out` (e.g. ld, jialc, daddiu).
if (info_low != nullptr) {
@@ -1679,13 +1679,13 @@
PcRelativePatchInfo* info_high = NewBootImageIntrinsicPatch(boot_image_reference);
PcRelativePatchInfo* info_low = NewBootImageIntrinsicPatch(boot_image_reference, info_high);
EmitPcRelativeAddressPlaceholderHigh(info_high, AT, info_low);
- __ Daddiu(reg, AT, /* placeholder */ 0x5678);
+ __ Daddiu(reg, AT, /* imm16= */ 0x5678);
} else if (GetCompilerOptions().GetCompilePic()) {
PcRelativePatchInfo* info_high = NewBootImageRelRoPatch(boot_image_reference);
PcRelativePatchInfo* info_low = NewBootImageRelRoPatch(boot_image_reference, info_high);
EmitPcRelativeAddressPlaceholderHigh(info_high, AT, info_low);
// Note: Boot image is in the low 4GiB and the entry is 32-bit, so emit a 32-bit load.
- __ Lwu(reg, AT, /* placeholder */ 0x5678);
+ __ Lwu(reg, AT, /* imm16= */ 0x5678);
} else {
DCHECK(Runtime::Current()->UseJitCompilation());
gc::Heap* heap = Runtime::Current()->GetHeap();
@@ -1710,7 +1710,7 @@
PcRelativePatchInfo* info_low =
NewBootImageTypePatch(*target_method.dex_file, type_idx, info_high);
EmitPcRelativeAddressPlaceholderHigh(info_high, AT, info_low);
- __ Daddiu(argument, AT, /* placeholder */ 0x5678);
+ __ Daddiu(argument, AT, /* imm16= */ 0x5678);
} else {
LoadBootImageAddress(argument, boot_image_offset);
}
@@ -1724,7 +1724,7 @@
ReserveJitStringRoot(StringReference(&dex_file, string_index), handle);
return jit_string_patches_.GetOrCreate(
StringReference(&dex_file, string_index),
- [this]() { return __ NewLiteral<uint32_t>(/* placeholder */ 0u); });
+ [this]() { return __ NewLiteral<uint32_t>(/* value= */ 0u); });
}
Literal* CodeGeneratorMIPS64::DeduplicateJitClassLiteral(const DexFile& dex_file,
@@ -1733,7 +1733,7 @@
ReserveJitClassRoot(TypeReference(&dex_file, type_index), handle);
return jit_class_patches_.GetOrCreate(
TypeReference(&dex_file, type_index),
- [this]() { return __ NewLiteral<uint32_t>(/* placeholder */ 0u); });
+ [this]() { return __ NewLiteral<uint32_t>(/* value= */ 0u); });
}
void CodeGeneratorMIPS64::PatchJitRootUse(uint8_t* code,
@@ -2458,7 +2458,7 @@
obj,
offset,
temp,
- /* needs_null_check */ false);
+ /* needs_null_check= */ false);
} else {
codegen_->GenerateArrayLoadWithBakerReadBarrier(instruction,
out_loc,
@@ -2466,7 +2466,7 @@
data_offset,
index,
temp,
- /* needs_null_check */ false);
+ /* needs_null_check= */ false);
}
} else {
GpuRegister out = out_loc.AsRegister<GpuRegister>();
@@ -3337,10 +3337,10 @@
switch (type) {
default:
// Integer case.
- GenerateIntLongCompare(instruction->GetCondition(), /* is64bit */ false, locations);
+ GenerateIntLongCompare(instruction->GetCondition(), /* is64bit= */ false, locations);
return;
case DataType::Type::kInt64:
- GenerateIntLongCompare(instruction->GetCondition(), /* is64bit */ true, locations);
+ GenerateIntLongCompare(instruction->GetCondition(), /* is64bit= */ true, locations);
return;
case DataType::Type::kFloat32:
case DataType::Type::kFloat64:
@@ -4449,10 +4449,10 @@
switch (type) {
default:
- GenerateIntLongCompareAndBranch(if_cond, /* is64bit */ false, locations, branch_target);
+ GenerateIntLongCompareAndBranch(if_cond, /* is64bit= */ false, locations, branch_target);
break;
case DataType::Type::kInt64:
- GenerateIntLongCompareAndBranch(if_cond, /* is64bit */ true, locations, branch_target);
+ GenerateIntLongCompareAndBranch(if_cond, /* is64bit= */ true, locations, branch_target);
break;
case DataType::Type::kFloat32:
case DataType::Type::kFloat64:
@@ -4482,7 +4482,7 @@
nullptr : codegen_->GetLabelOf(true_successor);
Mips64Label* false_target = codegen_->GoesToNextBlock(if_instr->GetBlock(), false_successor) ?
nullptr : codegen_->GetLabelOf(false_successor);
- GenerateTestAndBranch(if_instr, /* condition_input_index */ 0, true_target, false_target);
+ GenerateTestAndBranch(if_instr, /* condition_input_index= */ 0, true_target, false_target);
}
void LocationsBuilderMIPS64::VisitDeoptimize(HDeoptimize* deoptimize) {
@@ -4501,9 +4501,9 @@
SlowPathCodeMIPS64* slow_path =
deopt_slow_paths_.NewSlowPath<DeoptimizationSlowPathMIPS64>(deoptimize);
GenerateTestAndBranch(deoptimize,
- /* condition_input_index */ 0,
+ /* condition_input_index= */ 0,
slow_path->GetEntryLabel(),
- /* false_target */ nullptr);
+ /* false_target= */ nullptr);
}
// This function returns true if a conditional move can be generated for HSelect.
@@ -4517,7 +4517,7 @@
// of common logic.
static bool CanMoveConditionally(HSelect* select, LocationSummary* locations_to_set) {
bool materialized = IsBooleanValueOrMaterializedCondition(select->GetCondition());
- HInstruction* cond = select->InputAt(/* condition_input_index */ 2);
+ HInstruction* cond = select->InputAt(/* i= */ 2);
HCondition* condition = cond->AsCondition();
DataType::Type cond_type =
@@ -4660,7 +4660,7 @@
Location dst = locations->Out();
Location false_src = locations->InAt(0);
Location true_src = locations->InAt(1);
- HInstruction* cond = select->InputAt(/* condition_input_index */ 2);
+ HInstruction* cond = select->InputAt(/* i= */ 2);
GpuRegister cond_reg = TMP;
FpuRegister fcond_reg = FTMP;
DataType::Type cond_type = DataType::Type::kInt32;
@@ -4668,7 +4668,7 @@
DataType::Type dst_type = select->GetType();
if (IsBooleanValueOrMaterializedCondition(cond)) {
- cond_reg = locations->InAt(/* condition_input_index */ 2).AsRegister<GpuRegister>();
+ cond_reg = locations->InAt(/* at= */ 2).AsRegister<GpuRegister>();
} else {
HCondition* condition = cond->AsCondition();
LocationSummary* cond_locations = cond->GetLocations();
@@ -4677,13 +4677,13 @@
switch (cond_type) {
default:
cond_inverted = MaterializeIntLongCompare(if_cond,
- /* is64bit */ false,
+ /* is64bit= */ false,
cond_locations,
cond_reg);
break;
case DataType::Type::kInt64:
cond_inverted = MaterializeIntLongCompare(if_cond,
- /* is64bit */ true,
+ /* is64bit= */ true,
cond_locations,
cond_reg);
break;
@@ -4826,14 +4826,14 @@
}
void InstructionCodeGeneratorMIPS64::VisitSelect(HSelect* select) {
- if (CanMoveConditionally(select, /* locations_to_set */ nullptr)) {
+ if (CanMoveConditionally(select, /* locations_to_set= */ nullptr)) {
GenConditionalMove(select);
} else {
LocationSummary* locations = select->GetLocations();
Mips64Label false_target;
GenerateTestAndBranch(select,
- /* condition_input_index */ 2,
- /* true_target */ nullptr,
+ /* condition_input_index= */ 2,
+ /* true_target= */ nullptr,
&false_target);
codegen_->MoveLocation(locations->Out(), locations->InAt(1), select->GetType());
__ Bind(&false_target);
@@ -4945,7 +4945,7 @@
obj,
offset,
temp_loc,
- /* needs_null_check */ true);
+ /* needs_null_check= */ true);
if (is_volatile) {
GenerateMemoryBarrier(MemBarrierKind::kLoadAny);
}
@@ -5101,7 +5101,7 @@
out_reg,
offset,
maybe_temp,
- /* needs_null_check */ false);
+ /* needs_null_check= */ false);
} else {
// Load with slow path based read barrier.
// Save the value of `out` into `maybe_temp` before overwriting it
@@ -5142,7 +5142,7 @@
obj_reg,
offset,
maybe_temp,
- /* needs_null_check */ false);
+ /* needs_null_check= */ false);
} else {
// Load with slow path based read barrier.
// /* HeapReference<Object> */ out = *(obj + offset)
@@ -5230,7 +5230,7 @@
__ Daui(base, obj, offset_high);
}
Mips64Label skip_call;
- __ Beqz(T9, &skip_call, /* is_bare */ true);
+ __ Beqz(T9, &skip_call, /* is_bare= */ true);
if (label_low != nullptr) {
DCHECK(short_offset);
__ Bind(label_low);
@@ -5360,7 +5360,7 @@
GpuRegister ref_reg = ref.AsRegister<GpuRegister>();
Mips64Label skip_call;
if (short_offset) {
- __ Beqzc(T9, &skip_call, /* is_bare */ true);
+ __ Beqzc(T9, &skip_call, /* is_bare= */ true);
__ Nop(); // In forbidden slot.
__ Jialc(T9, thunk_disp);
__ Bind(&skip_call);
@@ -5369,7 +5369,7 @@
} else {
int16_t offset_low = Low16Bits(offset);
int16_t offset_high = High16Bits(offset - offset_low); // Accounts for sign extension in lwu.
- __ Beqz(T9, &skip_call, /* is_bare */ true);
+ __ Beqz(T9, &skip_call, /* is_bare= */ true);
__ Daui(TMP, obj, offset_high); // In delay slot.
__ Jialc(T9, thunk_disp);
__ Bind(&skip_call);
@@ -5442,12 +5442,12 @@
// We will not do the explicit null check in the thunk as some form of a null check
// must've been done earlier.
DCHECK(!needs_null_check);
- const int thunk_disp = GetBakerMarkFieldArrayThunkDisplacement(obj, /* short_offset */ false);
+ const int thunk_disp = GetBakerMarkFieldArrayThunkDisplacement(obj, /* short_offset= */ false);
// Loading the entrypoint does not require a load acquire since it is only changed when
// threads are suspended or running a checkpoint.
__ LoadFromOffset(kLoadDoubleword, T9, TR, entry_point_offset);
Mips64Label skip_call;
- __ Beqz(T9, &skip_call, /* is_bare */ true);
+ __ Beqz(T9, &skip_call, /* is_bare= */ true);
GpuRegister ref_reg = ref.AsRegister<GpuRegister>();
GpuRegister index_reg = index.AsRegister<GpuRegister>();
__ Dlsa(TMP, index_reg, obj, scale_factor); // In delay slot.
@@ -5558,7 +5558,7 @@
ReadBarrierMarkAndUpdateFieldSlowPathMIPS64(instruction,
ref,
obj,
- /* field_offset */ index,
+ /* field_offset= */ index,
temp_reg);
} else {
slow_path = new (GetScopedAllocator()) ReadBarrierMarkSlowPathMIPS64(instruction, ref);
@@ -5821,7 +5821,7 @@
kWithoutReadBarrier);
DCHECK(locations->OnlyCallsOnSlowPath());
slow_path = new (codegen_->GetScopedAllocator()) TypeCheckSlowPathMIPS64(
- instruction, /* is_fatal */ false);
+ instruction, /* is_fatal= */ false);
codegen_->AddSlowPath(slow_path);
__ Bnec(out, cls.AsRegister<GpuRegister>(), slow_path->GetEntryLabel());
__ LoadConst32(out, 1);
@@ -5850,7 +5850,7 @@
// This should also be beneficial for the other cases above.
DCHECK(locations->OnlyCallsOnSlowPath());
slow_path = new (codegen_->GetScopedAllocator()) TypeCheckSlowPathMIPS64(
- instruction, /* is_fatal */ false);
+ instruction, /* is_fatal= */ false);
codegen_->AddSlowPath(slow_path);
__ Bc(slow_path->GetEntryLabel());
break;
@@ -6092,7 +6092,7 @@
CodeGeneratorMIPS64::PcRelativePatchInfo* info_low =
NewBootImageMethodPatch(invoke->GetTargetMethod(), info_high);
EmitPcRelativeAddressPlaceholderHigh(info_high, AT, info_low);
- __ Daddiu(temp.AsRegister<GpuRegister>(), AT, /* placeholder */ 0x5678);
+ __ Daddiu(temp.AsRegister<GpuRegister>(), AT, /* imm16= */ 0x5678);
break;
}
case HInvokeStaticOrDirect::MethodLoadKind::kBootImageRelRo: {
@@ -6101,7 +6101,7 @@
PcRelativePatchInfo* info_low = NewBootImageRelRoPatch(boot_image_offset, info_high);
EmitPcRelativeAddressPlaceholderHigh(info_high, AT, info_low);
// Note: Boot image is in the low 4GiB and the entry is 32-bit, so emit a 32-bit load.
- __ Lwu(temp.AsRegister<GpuRegister>(), AT, /* placeholder */ 0x5678);
+ __ Lwu(temp.AsRegister<GpuRegister>(), AT, /* imm16= */ 0x5678);
break;
}
case HInvokeStaticOrDirect::MethodLoadKind::kBssEntry: {
@@ -6110,7 +6110,7 @@
PcRelativePatchInfo* info_low = NewMethodBssEntryPatch(
MethodReference(&GetGraph()->GetDexFile(), invoke->GetDexMethodIndex()), info_high);
EmitPcRelativeAddressPlaceholderHigh(info_high, AT, info_low);
- __ Ld(temp.AsRegister<GpuRegister>(), AT, /* placeholder */ 0x5678);
+ __ Ld(temp.AsRegister<GpuRegister>(), AT, /* imm16= */ 0x5678);
break;
}
case HInvokeStaticOrDirect::MethodLoadKind::kJitDirectAddress:
@@ -6280,7 +6280,7 @@
CodeGeneratorMIPS64::PcRelativePatchInfo* info_low =
codegen_->NewBootImageTypePatch(cls->GetDexFile(), cls->GetTypeIndex(), info_high);
codegen_->EmitPcRelativeAddressPlaceholderHigh(info_high, AT, info_low);
- __ Daddiu(out, AT, /* placeholder */ 0x5678);
+ __ Daddiu(out, AT, /* imm16= */ 0x5678);
break;
}
case HLoadClass::LoadKind::kBootImageRelRo: {
@@ -6291,7 +6291,7 @@
CodeGeneratorMIPS64::PcRelativePatchInfo* info_low =
codegen_->NewBootImageRelRoPatch(boot_image_offset, info_high);
codegen_->EmitPcRelativeAddressPlaceholderHigh(info_high, AT, info_low);
- __ Lwu(out, AT, /* placeholder */ 0x5678);
+ __ Lwu(out, AT, /* imm16= */ 0x5678);
break;
}
case HLoadClass::LoadKind::kBssEntry: {
@@ -6303,7 +6303,7 @@
GenerateGcRootFieldLoad(cls,
out_loc,
out,
- /* placeholder */ 0x5678,
+ /* offset= */ 0x5678,
read_barrier_option,
&info_low->label);
generate_null_check = true;
@@ -6427,7 +6427,7 @@
CodeGeneratorMIPS64::PcRelativePatchInfo* info_low =
codegen_->NewBootImageStringPatch(load->GetDexFile(), load->GetStringIndex(), info_high);
codegen_->EmitPcRelativeAddressPlaceholderHigh(info_high, AT, info_low);
- __ Daddiu(out, AT, /* placeholder */ 0x5678);
+ __ Daddiu(out, AT, /* imm16= */ 0x5678);
return;
}
case HLoadString::LoadKind::kBootImageRelRo: {
@@ -6438,7 +6438,7 @@
CodeGeneratorMIPS64::PcRelativePatchInfo* info_low =
codegen_->NewBootImageRelRoPatch(boot_image_offset, info_high);
codegen_->EmitPcRelativeAddressPlaceholderHigh(info_high, AT, info_low);
- __ Lwu(out, AT, /* placeholder */ 0x5678);
+ __ Lwu(out, AT, /* imm16= */ 0x5678);
return;
}
case HLoadString::LoadKind::kBssEntry: {
@@ -6451,7 +6451,7 @@
GenerateGcRootFieldLoad(load,
out_loc,
out,
- /* placeholder */ 0x5678,
+ /* offset= */ 0x5678,
kCompilerReadBarrierOption,
&info_low->label);
SlowPathCodeMIPS64* slow_path =
diff --git a/compiler/optimizing/code_generator_vector_mips.cc b/compiler/optimizing/code_generator_vector_mips.cc
index 09e96cc..4e9ba0d 100644
--- a/compiler/optimizing/code_generator_vector_mips.cc
+++ b/compiler/optimizing/code_generator_vector_mips.cc
@@ -74,19 +74,19 @@
__ InsertW(static_cast<VectorRegister>(FTMP),
locations->InAt(0).AsRegisterPairHigh<Register>(),
1);
- __ ReplicateFPToVectorRegister(dst, FTMP, /* is_double */ true);
+ __ ReplicateFPToVectorRegister(dst, FTMP, /* is_double= */ true);
break;
case DataType::Type::kFloat32:
DCHECK_EQ(4u, instruction->GetVectorLength());
__ ReplicateFPToVectorRegister(dst,
locations->InAt(0).AsFpuRegister<FRegister>(),
- /* is_double */ false);
+ /* is_double= */ false);
break;
case DataType::Type::kFloat64:
DCHECK_EQ(2u, instruction->GetVectorLength());
__ ReplicateFPToVectorRegister(dst,
locations->InAt(0).AsFpuRegister<FRegister>(),
- /* is_double */ true);
+ /* is_double= */ true);
break;
default:
LOG(FATAL) << "Unsupported SIMD type: " << instruction->GetPackedType();
@@ -1344,7 +1344,7 @@
}
void LocationsBuilderMIPS::VisitVecLoad(HVecLoad* instruction) {
- CreateVecMemLocations(GetGraph()->GetAllocator(), instruction, /* is_load */ true);
+ CreateVecMemLocations(GetGraph()->GetAllocator(), instruction, /* is_load= */ true);
}
void InstructionCodeGeneratorMIPS::VisitVecLoad(HVecLoad* instruction) {
@@ -1387,7 +1387,7 @@
}
void LocationsBuilderMIPS::VisitVecStore(HVecStore* instruction) {
- CreateVecMemLocations(GetGraph()->GetAllocator(), instruction, /* is_load */ false);
+ CreateVecMemLocations(GetGraph()->GetAllocator(), instruction, /* is_load= */ false);
}
void InstructionCodeGeneratorMIPS::VisitVecStore(HVecStore* instruction) {
diff --git a/compiler/optimizing/code_generator_vector_mips64.cc b/compiler/optimizing/code_generator_vector_mips64.cc
index b6873b1..6467d3e 100644
--- a/compiler/optimizing/code_generator_vector_mips64.cc
+++ b/compiler/optimizing/code_generator_vector_mips64.cc
@@ -79,13 +79,13 @@
DCHECK_EQ(4u, instruction->GetVectorLength());
__ ReplicateFPToVectorRegister(dst,
locations->InAt(0).AsFpuRegister<FpuRegister>(),
- /* is_double */ false);
+ /* is_double= */ false);
break;
case DataType::Type::kFloat64:
DCHECK_EQ(2u, instruction->GetVectorLength());
__ ReplicateFPToVectorRegister(dst,
locations->InAt(0).AsFpuRegister<FpuRegister>(),
- /* is_double */ true);
+ /* is_double= */ true);
break;
default:
LOG(FATAL) << "Unsupported SIMD type: " << instruction->GetPackedType();
@@ -1342,7 +1342,7 @@
}
void LocationsBuilderMIPS64::VisitVecLoad(HVecLoad* instruction) {
- CreateVecMemLocations(GetGraph()->GetAllocator(), instruction, /* is_load */ true);
+ CreateVecMemLocations(GetGraph()->GetAllocator(), instruction, /* is_load= */ true);
}
void InstructionCodeGeneratorMIPS64::VisitVecLoad(HVecLoad* instruction) {
@@ -1385,7 +1385,7 @@
}
void LocationsBuilderMIPS64::VisitVecStore(HVecStore* instruction) {
- CreateVecMemLocations(GetGraph()->GetAllocator(), instruction, /* is_load */ false);
+ CreateVecMemLocations(GetGraph()->GetAllocator(), instruction, /* is_load= */ false);
}
void InstructionCodeGeneratorMIPS64::VisitVecStore(HVecStore* instruction) {
diff --git a/compiler/optimizing/code_generator_x86.cc b/compiler/optimizing/code_generator_x86.cc
index 1b74d22..766ff78 100644
--- a/compiler/optimizing/code_generator_x86.cc
+++ b/compiler/optimizing/code_generator_x86.cc
@@ -1720,7 +1720,7 @@
nullptr : codegen_->GetLabelOf(true_successor);
Label* false_target = codegen_->GoesToNextBlock(if_instr->GetBlock(), false_successor) ?
nullptr : codegen_->GetLabelOf(false_successor);
- GenerateTestAndBranch(if_instr, /* condition_input_index */ 0, true_target, false_target);
+ GenerateTestAndBranch(if_instr, /* condition_input_index= */ 0, true_target, false_target);
}
void LocationsBuilderX86::VisitDeoptimize(HDeoptimize* deoptimize) {
@@ -1738,9 +1738,9 @@
void InstructionCodeGeneratorX86::VisitDeoptimize(HDeoptimize* deoptimize) {
SlowPathCode* slow_path = deopt_slow_paths_.NewSlowPath<DeoptimizationSlowPathX86>(deoptimize);
GenerateTestAndBranch<Label>(deoptimize,
- /* condition_input_index */ 0,
+ /* condition_input_index= */ 0,
slow_path->GetEntryLabel(),
- /* false_target */ nullptr);
+ /* false_target= */ nullptr);
}
void LocationsBuilderX86::VisitShouldDeoptimizeFlag(HShouldDeoptimizeFlag* flag) {
@@ -1863,7 +1863,7 @@
} else {
NearLabel false_target;
GenerateTestAndBranch<NearLabel>(
- select, /* condition_input_index */ 2, /* true_target */ nullptr, &false_target);
+ select, /* condition_input_index= */ 2, /* true_target= */ nullptr, &false_target);
codegen_->MoveLocation(locations->Out(), locations->InAt(1), select->GetType());
__ Bind(&false_target);
}
@@ -3434,8 +3434,8 @@
// Load the values to the FP stack in reverse order, using temporaries if needed.
const bool is_wide = !is_float;
- PushOntoFPStack(second, elem_size, 2 * elem_size, /* is_fp */ true, is_wide);
- PushOntoFPStack(first, 0, 2 * elem_size, /* is_fp */ true, is_wide);
+ PushOntoFPStack(second, elem_size, 2 * elem_size, /* is_fp= */ true, is_wide);
+ PushOntoFPStack(first, 0, 2 * elem_size, /* is_fp= */ true, is_wide);
// Loop doing FPREM until we stabilize.
NearLabel retry;
@@ -3572,7 +3572,7 @@
int64_t magic;
int shift;
- CalculateMagicAndShiftForDivRem(imm, false /* is_long */, &magic, &shift);
+ CalculateMagicAndShiftForDivRem(imm, /* is_long= */ false, &magic, &shift);
// Save the numerator.
__ movl(num, eax);
@@ -4801,7 +4801,7 @@
}
case MemBarrierKind::kNTStoreStore:
// Non-Temporal Store/Store needs an explicit fence.
- MemoryFence(/* non-temporal */ true);
+ MemoryFence(/* non-temporal= */ true);
break;
}
}
@@ -4936,14 +4936,14 @@
void CodeGeneratorX86::RecordBootImageIntrinsicPatch(HX86ComputeBaseMethodAddress* method_address,
uint32_t intrinsic_data) {
boot_image_intrinsic_patches_.emplace_back(
- method_address, /* target_dex_file */ nullptr, intrinsic_data);
+ method_address, /* target_dex_file= */ nullptr, intrinsic_data);
__ Bind(&boot_image_intrinsic_patches_.back().label);
}
void CodeGeneratorX86::RecordBootImageRelRoPatch(HX86ComputeBaseMethodAddress* method_address,
uint32_t boot_image_offset) {
boot_image_method_patches_.emplace_back(
- method_address, /* target_dex_file */ nullptr, boot_image_offset);
+ method_address, /* target_dex_file= */ nullptr, boot_image_offset);
__ Bind(&boot_image_method_patches_.back().label);
}
@@ -5237,7 +5237,7 @@
// Note that a potential implicit null check is handled in this
// CodeGeneratorX86::GenerateFieldLoadWithBakerReadBarrier call.
codegen_->GenerateFieldLoadWithBakerReadBarrier(
- instruction, out, base, offset, /* needs_null_check */ true);
+ instruction, out, base, offset, /* needs_null_check= */ true);
if (is_volatile) {
codegen_->GenerateMemoryBarrier(MemBarrierKind::kLoadAny);
}
@@ -5720,7 +5720,7 @@
// Note that a potential implicit null check is handled in this
// CodeGeneratorX86::GenerateArrayLoadWithBakerReadBarrier call.
codegen_->GenerateArrayLoadWithBakerReadBarrier(
- instruction, out_loc, obj, data_offset, index, /* needs_null_check */ true);
+ instruction, out_loc, obj, data_offset, index, /* needs_null_check= */ true);
} else {
Register out = out_loc.AsRegister<Register>();
__ movl(out, CodeGeneratorX86::ArrayAddress(obj, index, TIMES_4, data_offset));
@@ -6582,7 +6582,7 @@
cls,
out_loc,
Address(current_method, ArtMethod::DeclaringClassOffset().Int32Value()),
- /* fixup_label */ nullptr,
+ /* fixup_label= */ nullptr,
read_barrier_option);
break;
}
@@ -7109,7 +7109,7 @@
}
DCHECK(locations->OnlyCallsOnSlowPath());
slow_path = new (codegen_->GetScopedAllocator()) TypeCheckSlowPathX86(
- instruction, /* is_fatal */ false);
+ instruction, /* is_fatal= */ false);
codegen_->AddSlowPath(slow_path);
__ j(kNotEqual, slow_path->GetEntryLabel());
__ movl(out, Immediate(1));
@@ -7141,7 +7141,7 @@
// This should also be beneficial for the other cases above.
DCHECK(locations->OnlyCallsOnSlowPath());
slow_path = new (codegen_->GetScopedAllocator()) TypeCheckSlowPathX86(
- instruction, /* is_fatal */ false);
+ instruction, /* is_fatal= */ false);
codegen_->AddSlowPath(slow_path);
__ jmp(slow_path->GetEntryLabel());
if (zero.IsLinked()) {
@@ -7650,7 +7650,7 @@
// Load with fast path based Baker's read barrier.
// /* HeapReference<Object> */ out = *(out + offset)
codegen_->GenerateFieldLoadWithBakerReadBarrier(
- instruction, out, out_reg, offset, /* needs_null_check */ false);
+ instruction, out, out_reg, offset, /* needs_null_check= */ false);
} else {
// Load with slow path based read barrier.
// Save the value of `out` into `maybe_temp` before overwriting it
@@ -7684,7 +7684,7 @@
// Load with fast path based Baker's read barrier.
// /* HeapReference<Object> */ out = *(obj + offset)
codegen_->GenerateFieldLoadWithBakerReadBarrier(
- instruction, out, obj_reg, offset, /* needs_null_check */ false);
+ instruction, out, obj_reg, offset, /* needs_null_check= */ false);
} else {
// Load with slow path based read barrier.
// /* HeapReference<Object> */ out = *(obj + offset)
@@ -7733,7 +7733,7 @@
// Slow path marking the GC root `root`.
SlowPathCode* slow_path = new (codegen_->GetScopedAllocator()) ReadBarrierMarkSlowPathX86(
- instruction, root, /* unpoison_ref_before_marking */ false);
+ instruction, root, /* unpoison_ref_before_marking= */ false);
codegen_->AddSlowPath(slow_path);
// Test the entrypoint (`Thread::Current()->pReadBarrierMarkReg ## root.reg()`).
@@ -7863,10 +7863,10 @@
if (always_update_field) {
DCHECK(temp != nullptr);
slow_path = new (GetScopedAllocator()) ReadBarrierMarkAndUpdateFieldSlowPathX86(
- instruction, ref, obj, src, /* unpoison_ref_before_marking */ true, *temp);
+ instruction, ref, obj, src, /* unpoison_ref_before_marking= */ true, *temp);
} else {
slow_path = new (GetScopedAllocator()) ReadBarrierMarkSlowPathX86(
- instruction, ref, /* unpoison_ref_before_marking */ true);
+ instruction, ref, /* unpoison_ref_before_marking= */ true);
}
AddSlowPath(slow_path);
diff --git a/compiler/optimizing/code_generator_x86_64.cc b/compiler/optimizing/code_generator_x86_64.cc
index 781f272..67a2aa5 100644
--- a/compiler/optimizing/code_generator_x86_64.cc
+++ b/compiler/optimizing/code_generator_x86_64.cc
@@ -992,7 +992,7 @@
// temp = thread->string_init_entrypoint
uint32_t offset =
GetThreadOffset<kX86_64PointerSize>(invoke->GetStringInitEntryPoint()).Int32Value();
- __ gs()->movq(temp.AsRegister<CpuRegister>(), Address::Absolute(offset, /* no_rip */ true));
+ __ gs()->movq(temp.AsRegister<CpuRegister>(), Address::Absolute(offset, /* no_rip= */ true));
break;
}
case HInvokeStaticOrDirect::MethodLoadKind::kRecursive:
@@ -1001,19 +1001,19 @@
case HInvokeStaticOrDirect::MethodLoadKind::kBootImageLinkTimePcRelative:
DCHECK(GetCompilerOptions().IsBootImage());
__ leal(temp.AsRegister<CpuRegister>(),
- Address::Absolute(kDummy32BitOffset, /* no_rip */ false));
+ Address::Absolute(kDummy32BitOffset, /* no_rip= */ false));
RecordBootImageMethodPatch(invoke);
break;
case HInvokeStaticOrDirect::MethodLoadKind::kBootImageRelRo: {
// Note: Boot image is in the low 4GiB and the entry is 32-bit, so emit a 32-bit load.
__ movl(temp.AsRegister<CpuRegister>(),
- Address::Absolute(kDummy32BitOffset, /* no_rip */ false));
+ Address::Absolute(kDummy32BitOffset, /* no_rip= */ false));
RecordBootImageRelRoPatch(GetBootImageOffset(invoke));
break;
}
case HInvokeStaticOrDirect::MethodLoadKind::kBssEntry: {
__ movq(temp.AsRegister<CpuRegister>(),
- Address::Absolute(kDummy32BitOffset, /* no_rip */ false));
+ Address::Absolute(kDummy32BitOffset, /* no_rip= */ false));
RecordMethodBssEntryPatch(invoke);
break;
}
@@ -1076,12 +1076,12 @@
}
void CodeGeneratorX86_64::RecordBootImageIntrinsicPatch(uint32_t intrinsic_data) {
- boot_image_intrinsic_patches_.emplace_back(/* target_dex_file */ nullptr, intrinsic_data);
+ boot_image_intrinsic_patches_.emplace_back(/* target_dex_file= */ nullptr, intrinsic_data);
__ Bind(&boot_image_intrinsic_patches_.back().label);
}
void CodeGeneratorX86_64::RecordBootImageRelRoPatch(uint32_t boot_image_offset) {
- boot_image_method_patches_.emplace_back(/* target_dex_file */ nullptr, boot_image_offset);
+ boot_image_method_patches_.emplace_back(/* target_dex_file= */ nullptr, boot_image_offset);
__ Bind(&boot_image_method_patches_.back().label);
}
@@ -1123,10 +1123,10 @@
void CodeGeneratorX86_64::LoadBootImageAddress(CpuRegister reg, uint32_t boot_image_reference) {
if (GetCompilerOptions().IsBootImage()) {
- __ leal(reg, Address::Absolute(CodeGeneratorX86_64::kDummy32BitOffset, /* no_rip */ false));
+ __ leal(reg, Address::Absolute(CodeGeneratorX86_64::kDummy32BitOffset, /* no_rip= */ false));
RecordBootImageIntrinsicPatch(boot_image_reference);
} else if (GetCompilerOptions().GetCompilePic()) {
- __ movl(reg, Address::Absolute(CodeGeneratorX86_64::kDummy32BitOffset, /* no_rip */ false));
+ __ movl(reg, Address::Absolute(CodeGeneratorX86_64::kDummy32BitOffset, /* no_rip= */ false));
RecordBootImageRelRoPatch(boot_image_reference);
} else {
DCHECK(Runtime::Current()->UseJitCompilation());
@@ -1146,7 +1146,7 @@
DCHECK_EQ(boot_image_offset, IntrinsicVisitor::IntegerValueOfInfo::kInvalidReference);
// Load the class the same way as for HLoadClass::LoadKind::kBootImageLinkTimePcRelative.
__ leal(argument,
- Address::Absolute(CodeGeneratorX86_64::kDummy32BitOffset, /* no_rip */ false));
+ Address::Absolute(CodeGeneratorX86_64::kDummy32BitOffset, /* no_rip= */ false));
MethodReference target_method = invoke->GetTargetMethod();
dex::TypeIndex type_idx = target_method.dex_file->GetMethodId(target_method.index).class_idx_;
boot_image_type_patches_.emplace_back(target_method.dex_file, type_idx.index_);
@@ -1277,7 +1277,7 @@
}
void CodeGeneratorX86_64::GenerateInvokeRuntime(int32_t entry_point_offset) {
- __ gs()->call(Address::Absolute(entry_point_offset, /* no_rip */ true));
+ __ gs()->call(Address::Absolute(entry_point_offset, /* no_rip= */ true));
}
static constexpr int kNumberOfCpuRegisterPairs = 0;
@@ -1799,7 +1799,7 @@
nullptr : codegen_->GetLabelOf(true_successor);
Label* false_target = codegen_->GoesToNextBlock(if_instr->GetBlock(), false_successor) ?
nullptr : codegen_->GetLabelOf(false_successor);
- GenerateTestAndBranch(if_instr, /* condition_input_index */ 0, true_target, false_target);
+ GenerateTestAndBranch(if_instr, /* condition_input_index= */ 0, true_target, false_target);
}
void LocationsBuilderX86_64::VisitDeoptimize(HDeoptimize* deoptimize) {
@@ -1817,9 +1817,9 @@
void InstructionCodeGeneratorX86_64::VisitDeoptimize(HDeoptimize* deoptimize) {
SlowPathCode* slow_path = deopt_slow_paths_.NewSlowPath<DeoptimizationSlowPathX86_64>(deoptimize);
GenerateTestAndBranch<Label>(deoptimize,
- /* condition_input_index */ 0,
+ /* condition_input_index= */ 0,
slow_path->GetEntryLabel(),
- /* false_target */ nullptr);
+ /* false_target= */ nullptr);
}
void LocationsBuilderX86_64::VisitShouldDeoptimizeFlag(HShouldDeoptimizeFlag* flag) {
@@ -1922,8 +1922,8 @@
} else {
NearLabel false_target;
GenerateTestAndBranch<NearLabel>(select,
- /* condition_input_index */ 2,
- /* true_target */ nullptr,
+ /* condition_input_index= */ 2,
+ /* true_target= */ nullptr,
&false_target);
codegen_->MoveLocation(locations->Out(), locations->InAt(1), select->GetType());
__ Bind(&false_target);
@@ -3679,7 +3679,7 @@
if (instruction->GetResultType() == DataType::Type::kInt32) {
int imm = second.GetConstant()->AsIntConstant()->GetValue();
- CalculateMagicAndShiftForDivRem(imm, false /* is_long */, &magic, &shift);
+ CalculateMagicAndShiftForDivRem(imm, false /* is_long= */, &magic, &shift);
__ movl(numerator, eax);
@@ -3716,7 +3716,7 @@
CpuRegister rax = eax;
CpuRegister rdx = edx;
- CalculateMagicAndShiftForDivRem(imm, true /* is_long */, &magic, &shift);
+ CalculateMagicAndShiftForDivRem(imm, true /* is_long= */, &magic, &shift);
// Save the numerator.
__ movq(numerator, rax);
@@ -4554,7 +4554,7 @@
}
case MemBarrierKind::kNTStoreStore:
// Non-Temporal Store/Store needs an explicit fence.
- MemoryFence(/* non-temporal */ true);
+ MemoryFence(/* non-temporal= */ true);
break;
}
}
@@ -4631,7 +4631,7 @@
// Note that a potential implicit null check is handled in this
// CodeGeneratorX86_64::GenerateFieldLoadWithBakerReadBarrier call.
codegen_->GenerateFieldLoadWithBakerReadBarrier(
- instruction, out, base, offset, /* needs_null_check */ true);
+ instruction, out, base, offset, /* needs_null_check= */ true);
if (is_volatile) {
codegen_->GenerateMemoryBarrier(MemBarrierKind::kLoadAny);
}
@@ -5086,7 +5086,7 @@
// Note that a potential implicit null check is handled in this
// CodeGeneratorX86_64::GenerateArrayLoadWithBakerReadBarrier call.
codegen_->GenerateArrayLoadWithBakerReadBarrier(
- instruction, out_loc, obj, data_offset, index, /* needs_null_check */ true);
+ instruction, out_loc, obj, data_offset, index, /* needs_null_check= */ true);
} else {
CpuRegister out = out_loc.AsRegister<CpuRegister>();
__ movl(out, CodeGeneratorX86_64::ArrayAddress(obj, index, TIMES_4, data_offset));
@@ -5486,7 +5486,7 @@
}
// Load the address of the card table into `card`.
__ gs()->movq(card, Address::Absolute(Thread::CardTableOffset<kX86_64PointerSize>().Int32Value(),
- /* no_rip */ true));
+ /* no_rip= */ true));
// Calculate the offset (in the card table) of the card corresponding to
// `object`.
__ movq(temp, object);
@@ -5566,7 +5566,7 @@
}
__ gs()->cmpw(Address::Absolute(Thread::ThreadFlagsOffset<kX86_64PointerSize>().Int32Value(),
- /* no_rip */ true),
+ /* no_rip= */ true),
Immediate(0));
if (successor == nullptr) {
__ j(kNotEqual, slow_path->GetEntryLabel());
@@ -5948,25 +5948,25 @@
cls,
out_loc,
Address(current_method, ArtMethod::DeclaringClassOffset().Int32Value()),
- /* fixup_label */ nullptr,
+ /* fixup_label= */ nullptr,
read_barrier_option);
break;
}
case HLoadClass::LoadKind::kBootImageLinkTimePcRelative:
DCHECK(codegen_->GetCompilerOptions().IsBootImage());
DCHECK_EQ(read_barrier_option, kWithoutReadBarrier);
- __ leal(out, Address::Absolute(CodeGeneratorX86_64::kDummy32BitOffset, /* no_rip */ false));
+ __ leal(out, Address::Absolute(CodeGeneratorX86_64::kDummy32BitOffset, /* no_rip= */ false));
codegen_->RecordBootImageTypePatch(cls);
break;
case HLoadClass::LoadKind::kBootImageRelRo: {
DCHECK(!codegen_->GetCompilerOptions().IsBootImage());
- __ movl(out, Address::Absolute(CodeGeneratorX86_64::kDummy32BitOffset, /* no_rip */ false));
+ __ movl(out, Address::Absolute(CodeGeneratorX86_64::kDummy32BitOffset, /* no_rip= */ false));
codegen_->RecordBootImageRelRoPatch(codegen_->GetBootImageOffset(cls));
break;
}
case HLoadClass::LoadKind::kBssEntry: {
Address address = Address::Absolute(CodeGeneratorX86_64::kDummy32BitOffset,
- /* no_rip */ false);
+ /* no_rip= */ false);
Label* fixup_label = codegen_->NewTypeBssEntryPatch(cls);
// /* GcRoot<mirror::Class> */ out = *address /* PC-relative */
GenerateGcRootFieldLoad(cls, out_loc, address, fixup_label, read_barrier_option);
@@ -5982,7 +5982,7 @@
}
case HLoadClass::LoadKind::kJitTableAddress: {
Address address = Address::Absolute(CodeGeneratorX86_64::kDummy32BitOffset,
- /* no_rip */ true);
+ /* no_rip= */ true);
Label* fixup_label =
codegen_->NewJitRootClassPatch(cls->GetDexFile(), cls->GetTypeIndex(), cls->GetClass());
// /* GcRoot<mirror::Class> */ out = *address
@@ -6107,19 +6107,19 @@
switch (load->GetLoadKind()) {
case HLoadString::LoadKind::kBootImageLinkTimePcRelative: {
DCHECK(codegen_->GetCompilerOptions().IsBootImage());
- __ leal(out, Address::Absolute(CodeGeneratorX86_64::kDummy32BitOffset, /* no_rip */ false));
+ __ leal(out, Address::Absolute(CodeGeneratorX86_64::kDummy32BitOffset, /* no_rip= */ false));
codegen_->RecordBootImageStringPatch(load);
return;
}
case HLoadString::LoadKind::kBootImageRelRo: {
DCHECK(!codegen_->GetCompilerOptions().IsBootImage());
- __ movl(out, Address::Absolute(CodeGeneratorX86_64::kDummy32BitOffset, /* no_rip */ false));
+ __ movl(out, Address::Absolute(CodeGeneratorX86_64::kDummy32BitOffset, /* no_rip= */ false));
codegen_->RecordBootImageRelRoPatch(codegen_->GetBootImageOffset(load));
return;
}
case HLoadString::LoadKind::kBssEntry: {
Address address = Address::Absolute(CodeGeneratorX86_64::kDummy32BitOffset,
- /* no_rip */ false);
+ /* no_rip= */ false);
Label* fixup_label = codegen_->NewStringBssEntryPatch(load);
// /* GcRoot<mirror::Class> */ out = *address /* PC-relative */
GenerateGcRootFieldLoad(load, out_loc, address, fixup_label, kCompilerReadBarrierOption);
@@ -6138,7 +6138,7 @@
}
case HLoadString::LoadKind::kJitTableAddress: {
Address address = Address::Absolute(CodeGeneratorX86_64::kDummy32BitOffset,
- /* no_rip */ true);
+ /* no_rip= */ true);
Label* fixup_label = codegen_->NewJitRootStringPatch(
load->GetDexFile(), load->GetStringIndex(), load->GetString());
// /* GcRoot<mirror::String> */ out = *address
@@ -6160,7 +6160,7 @@
static Address GetExceptionTlsAddress() {
return Address::Absolute(Thread::ExceptionOffset<kX86_64PointerSize>().Int32Value(),
- /* no_rip */ true);
+ /* no_rip= */ true);
}
void LocationsBuilderX86_64::VisitLoadException(HLoadException* load) {
@@ -6435,7 +6435,7 @@
}
DCHECK(locations->OnlyCallsOnSlowPath());
slow_path = new (codegen_->GetScopedAllocator()) TypeCheckSlowPathX86_64(
- instruction, /* is_fatal */ false);
+ instruction, /* is_fatal= */ false);
codegen_->AddSlowPath(slow_path);
__ j(kNotEqual, slow_path->GetEntryLabel());
__ movl(out, Immediate(1));
@@ -6467,7 +6467,7 @@
// This should also be beneficial for the other cases above.
DCHECK(locations->OnlyCallsOnSlowPath());
slow_path = new (codegen_->GetScopedAllocator()) TypeCheckSlowPathX86_64(
- instruction, /* is_fatal */ false);
+ instruction, /* is_fatal= */ false);
codegen_->AddSlowPath(slow_path);
__ jmp(slow_path->GetEntryLabel());
if (zero.IsLinked()) {
@@ -6954,7 +6954,7 @@
// Load with fast path based Baker's read barrier.
// /* HeapReference<Object> */ out = *(out + offset)
codegen_->GenerateFieldLoadWithBakerReadBarrier(
- instruction, out, out_reg, offset, /* needs_null_check */ false);
+ instruction, out, out_reg, offset, /* needs_null_check= */ false);
} else {
// Load with slow path based read barrier.
// Save the value of `out` into `maybe_temp` before overwriting it
@@ -6988,7 +6988,7 @@
// Load with fast path based Baker's read barrier.
// /* HeapReference<Object> */ out = *(obj + offset)
codegen_->GenerateFieldLoadWithBakerReadBarrier(
- instruction, out, obj_reg, offset, /* needs_null_check */ false);
+ instruction, out, obj_reg, offset, /* needs_null_check= */ false);
} else {
// Load with slow path based read barrier.
// /* HeapReference<Object> */ out = *(obj + offset)
@@ -7037,13 +7037,13 @@
// Slow path marking the GC root `root`.
SlowPathCode* slow_path = new (codegen_->GetScopedAllocator()) ReadBarrierMarkSlowPathX86_64(
- instruction, root, /* unpoison_ref_before_marking */ false);
+ instruction, root, /* unpoison_ref_before_marking= */ false);
codegen_->AddSlowPath(slow_path);
// Test the `Thread::Current()->pReadBarrierMarkReg ## root.reg()` entrypoint.
const int32_t entry_point_offset =
Thread::ReadBarrierMarkEntryPointsOffset<kX86_64PointerSize>(root.reg());
- __ gs()->cmpl(Address::Absolute(entry_point_offset, /* no_rip */ true), Immediate(0));
+ __ gs()->cmpl(Address::Absolute(entry_point_offset, /* no_rip= */ true), Immediate(0));
// The entrypoint is null when the GC is not marking.
__ j(kNotEqual, slow_path->GetEntryLabel());
__ Bind(slow_path->GetExitLabel());
@@ -7169,10 +7169,10 @@
DCHECK(temp1 != nullptr);
DCHECK(temp2 != nullptr);
slow_path = new (GetScopedAllocator()) ReadBarrierMarkAndUpdateFieldSlowPathX86_64(
- instruction, ref, obj, src, /* unpoison_ref_before_marking */ true, *temp1, *temp2);
+ instruction, ref, obj, src, /* unpoison_ref_before_marking= */ true, *temp1, *temp2);
} else {
slow_path = new (GetScopedAllocator()) ReadBarrierMarkSlowPathX86_64(
- instruction, ref, /* unpoison_ref_before_marking */ true);
+ instruction, ref, /* unpoison_ref_before_marking= */ true);
}
AddSlowPath(slow_path);
diff --git a/compiler/optimizing/code_sinking.cc b/compiler/optimizing/code_sinking.cc
index d6c9755..f406983 100644
--- a/compiler/optimizing/code_sinking.cc
+++ b/compiler/optimizing/code_sinking.cc
@@ -180,7 +180,7 @@
DCHECK(!instruction->IsPhi()); // Makes no sense for Phi.
// Find the target block.
- CommonDominator finder(/* start_block */ nullptr);
+ CommonDominator finder(/* block= */ nullptr);
for (const HUseListNode<HInstruction*>& use : instruction->GetUses()) {
HInstruction* user = use.GetUser();
if (!(filter && ShouldFilterUse(instruction, user, post_dominated))) {
@@ -259,12 +259,12 @@
size_t number_of_instructions = graph_->GetCurrentInstructionId();
ScopedArenaVector<HInstruction*> worklist(allocator.Adapter(kArenaAllocMisc));
- ArenaBitVector processed_instructions(&allocator, number_of_instructions, /* expandable */ false);
+ ArenaBitVector processed_instructions(&allocator, number_of_instructions, /* expandable= */ false);
processed_instructions.ClearAllBits();
- ArenaBitVector post_dominated(&allocator, graph_->GetBlocks().size(), /* expandable */ false);
+ ArenaBitVector post_dominated(&allocator, graph_->GetBlocks().size(), /* expandable= */ false);
post_dominated.ClearAllBits();
ArenaBitVector instructions_that_can_move(
- &allocator, number_of_instructions, /* expandable */ false);
+ &allocator, number_of_instructions, /* expandable= */ false);
instructions_that_can_move.ClearAllBits();
ScopedArenaVector<HInstruction*> move_in_order(allocator.Adapter(kArenaAllocMisc));
@@ -414,7 +414,7 @@
}
// Find the position of the instruction we're storing into, filtering out this
// store and all other stores to that instruction.
- position = FindIdealPosition(instruction->InputAt(0), post_dominated, /* filter */ true);
+ position = FindIdealPosition(instruction->InputAt(0), post_dominated, /* filter= */ true);
// The position needs to be dominated by the store, in order for the store to move there.
if (position == nullptr || !instruction->GetBlock()->Dominates(position->GetBlock())) {
@@ -434,7 +434,7 @@
continue;
}
MaybeRecordStat(stats_, MethodCompilationStat::kInstructionSunk);
- instruction->MoveBefore(position, /* ensure_safety */ false);
+ instruction->MoveBefore(position, /* do_checks= */ false);
}
}
diff --git a/compiler/optimizing/constant_folding_test.cc b/compiler/optimizing/constant_folding_test.cc
index b1436f8..74d9d3a 100644
--- a/compiler/optimizing/constant_folding_test.cc
+++ b/compiler/optimizing/constant_folding_test.cc
@@ -70,7 +70,7 @@
check_after_cf(graph_);
- HDeadCodeElimination(graph_, nullptr /* stats */, "dead_code_elimination").Run();
+ HDeadCodeElimination(graph_, /* stats= */ nullptr, "dead_code_elimination").Run();
GraphChecker graph_checker_dce(graph_);
graph_checker_dce.Run();
ASSERT_TRUE(graph_checker_dce.IsValid());
diff --git a/compiler/optimizing/dead_code_elimination_test.cc b/compiler/optimizing/dead_code_elimination_test.cc
index 2774535..f5cd4dc 100644
--- a/compiler/optimizing/dead_code_elimination_test.cc
+++ b/compiler/optimizing/dead_code_elimination_test.cc
@@ -43,7 +43,7 @@
std::string actual_before = printer_before.str();
ASSERT_EQ(actual_before, expected_before);
- HDeadCodeElimination(graph, nullptr /* stats */, "dead_code_elimination").Run();
+ HDeadCodeElimination(graph, /* stats= */ nullptr, "dead_code_elimination").Run();
GraphChecker graph_checker(graph);
graph_checker.Run();
ASSERT_TRUE(graph_checker.IsValid());
diff --git a/compiler/optimizing/graph_checker.cc b/compiler/optimizing/graph_checker.cc
index a689f35..01d9603 100644
--- a/compiler/optimizing/graph_checker.cc
+++ b/compiler/optimizing/graph_checker.cc
@@ -635,8 +635,8 @@
}
}
CheckTypeCheckBitstringInput(
- check, /* input_pos */ 2, check_values, expected_path_to_root, "path_to_root");
- CheckTypeCheckBitstringInput(check, /* input_pos */ 3, check_values, expected_mask, "mask");
+ check, /* input_pos= */ 2, check_values, expected_path_to_root, "path_to_root");
+ CheckTypeCheckBitstringInput(check, /* input_pos= */ 3, check_values, expected_mask, "mask");
} else {
if (!input->IsLoadClass()) {
AddError(StringPrintf("%s:%d (classic) expects a HLoadClass as second input, not %s:%d.",
@@ -931,7 +931,7 @@
// because the BitVector reallocation strategy has very bad worst-case behavior.
ArenaBitVector visited(&allocator,
GetGraph()->GetCurrentInstructionId(),
- /* expandable */ false,
+ /* expandable= */ false,
kArenaAllocGraphChecker);
visited.ClearAllBits();
if (!IsConstantEquivalent(phi, other_phi, &visited)) {
diff --git a/compiler/optimizing/graph_visualizer.cc b/compiler/optimizing/graph_visualizer.cc
index a1af2be..2a7bbcb 100644
--- a/compiler/optimizing/graph_visualizer.cc
+++ b/compiler/optimizing/graph_visualizer.cc
@@ -130,10 +130,10 @@
// been generated, so we can read data in literal pools.
disassembler_ = std::unique_ptr<Disassembler>((*create_disassembler)(
instruction_set,
- new DisassemblerOptions(/* absolute_addresses */ false,
+ new DisassemblerOptions(/* absolute_addresses= */ false,
base_address,
end_address,
- /* can_read_literals */ true,
+ /* can_read_literals= */ true,
Is64BitInstructionSet(instruction_set)
? &Thread::DumpThreadOffset<PointerSize::k64>
: &Thread::DumpThreadOffset<PointerSize::k32>)));
@@ -393,7 +393,7 @@
void VisitLoadMethodType(HLoadMethodType* load_method_type) override {
StartAttributeStream("load_kind") << "RuntimeCall";
const DexFile& dex_file = load_method_type->GetDexFile();
- const DexFile::ProtoId& proto_id = dex_file.GetProtoId(load_method_type->GetProtoIndex());
+ const dex::ProtoId& proto_id = dex_file.GetProtoId(load_method_type->GetProtoIndex());
StartAttributeStream("method_type") << dex_file.GetProtoSignature(proto_id);
}
@@ -924,8 +924,8 @@
HGraphVisualizerPrinter printer(graph_,
*output_,
"disassembly",
- /* is_after_pass */ true,
- /* graph_in_bad_state */ false,
+ /* is_after_pass= */ true,
+ /* graph_in_bad_state= */ false,
codegen_,
codegen_.GetDisassemblyInformation());
printer.Run();
diff --git a/compiler/optimizing/gvn.cc b/compiler/optimizing/gvn.cc
index e6b6326..3689d1d 100644
--- a/compiler/optimizing/gvn.cc
+++ b/compiler/optimizing/gvn.cc
@@ -348,7 +348,7 @@
side_effects_(side_effects),
sets_(graph->GetBlocks().size(), nullptr, allocator_.Adapter(kArenaAllocGvn)),
visited_blocks_(
- &allocator_, graph->GetBlocks().size(), /* expandable */ false, kArenaAllocGvn) {
+ &allocator_, graph->GetBlocks().size(), /* expandable= */ false, kArenaAllocGvn) {
visited_blocks_.ClearAllBits();
}
@@ -546,12 +546,12 @@
// that is larger, we return it if no perfectly-matching set is found.
// Note that we defer testing WillBeReferencedAgain until all other criteria
// have been satisfied because it might be expensive.
- if (current_set->CanHoldCopyOf(reference_set, /* exact_match */ true)) {
+ if (current_set->CanHoldCopyOf(reference_set, /* exact_match= */ true)) {
if (!WillBeReferencedAgain(current_block)) {
return current_block;
}
} else if (secondary_match == nullptr &&
- current_set->CanHoldCopyOf(reference_set, /* exact_match */ false)) {
+ current_set->CanHoldCopyOf(reference_set, /* exact_match= */ false)) {
if (!WillBeReferencedAgain(current_block)) {
secondary_match = current_block;
}
diff --git a/compiler/optimizing/induction_var_range.cc b/compiler/optimizing/induction_var_range.cc
index 55eca23..4c78fa8 100644
--- a/compiler/optimizing/induction_var_range.cc
+++ b/compiler/optimizing/induction_var_range.cc
@@ -216,13 +216,13 @@
chase_hint_ = chase_hint;
bool in_body = context->GetBlock() != loop->GetHeader();
int64_t stride_value = 0;
- *min_val = SimplifyMin(GetVal(info, trip, in_body, /* is_min */ true));
- *max_val = SimplifyMax(GetVal(info, trip, in_body, /* is_min */ false), chase_hint);
+ *min_val = SimplifyMin(GetVal(info, trip, in_body, /* is_min= */ true));
+ *max_val = SimplifyMax(GetVal(info, trip, in_body, /* is_min= */ false), chase_hint);
*needs_finite_test = NeedsTripCount(info, &stride_value) && IsUnsafeTripCount(trip);
chase_hint_ = nullptr;
// Retry chasing constants for wrap-around (merge sensitive).
if (!min_val->is_known && info->induction_class == HInductionVarAnalysis::kWrapAround) {
- *min_val = SimplifyMin(GetVal(info, trip, in_body, /* is_min */ true));
+ *min_val = SimplifyMin(GetVal(info, trip, in_body, /* is_min= */ true));
}
return true;
}
@@ -445,8 +445,8 @@
}
// Try range analysis on the invariant, only accept a proper range
// to avoid arithmetic wrap-around anomalies.
- Value min_val = GetVal(info, nullptr, /* in_body */ true, /* is_min */ true);
- Value max_val = GetVal(info, nullptr, /* in_body */ true, /* is_min */ false);
+ Value min_val = GetVal(info, nullptr, /* in_body= */ true, /* is_min= */ true);
+ Value max_val = GetVal(info, nullptr, /* in_body= */ true, /* is_min= */ false);
if (IsConstantValue(min_val) &&
IsConstantValue(max_val) && min_val.b_constant <= max_val.b_constant) {
if ((request == kExact && min_val.b_constant == max_val.b_constant) || request == kAtMost) {
@@ -791,10 +791,10 @@
return MulRangeAndConstant(value, info1, trip, in_body, is_min);
}
// Interval ranges.
- Value v1_min = GetVal(info1, trip, in_body, /* is_min */ true);
- Value v1_max = GetVal(info1, trip, in_body, /* is_min */ false);
- Value v2_min = GetVal(info2, trip, in_body, /* is_min */ true);
- Value v2_max = GetVal(info2, trip, in_body, /* is_min */ false);
+ Value v1_min = GetVal(info1, trip, in_body, /* is_min= */ true);
+ Value v1_max = GetVal(info1, trip, in_body, /* is_min= */ false);
+ Value v2_min = GetVal(info2, trip, in_body, /* is_min= */ true);
+ Value v2_max = GetVal(info2, trip, in_body, /* is_min= */ false);
// Positive range vs. positive or negative range.
if (IsConstantValue(v1_min) && v1_min.b_constant >= 0) {
if (IsConstantValue(v2_min) && v2_min.b_constant >= 0) {
@@ -825,10 +825,10 @@
return DivRangeAndConstant(value, info1, trip, in_body, is_min);
}
// Interval ranges.
- Value v1_min = GetVal(info1, trip, in_body, /* is_min */ true);
- Value v1_max = GetVal(info1, trip, in_body, /* is_min */ false);
- Value v2_min = GetVal(info2, trip, in_body, /* is_min */ true);
- Value v2_max = GetVal(info2, trip, in_body, /* is_min */ false);
+ Value v1_min = GetVal(info1, trip, in_body, /* is_min= */ true);
+ Value v1_max = GetVal(info1, trip, in_body, /* is_min= */ false);
+ Value v2_min = GetVal(info2, trip, in_body, /* is_min= */ true);
+ Value v2_max = GetVal(info2, trip, in_body, /* is_min= */ false);
// Positive range vs. positive or negative range.
if (IsConstantValue(v1_min) && v1_min.b_constant >= 0) {
if (IsConstantValue(v2_min) && v2_min.b_constant >= 0) {
@@ -1019,10 +1019,10 @@
// Code generation for taken test: generate the code when requested or otherwise analyze
// if code generation is feasible when taken test is needed.
if (taken_test != nullptr) {
- return GenerateCode(trip->op_b, nullptr, graph, block, taken_test, in_body, /* is_min */ false);
+ return GenerateCode(trip->op_b, nullptr, graph, block, taken_test, in_body, /* is_min= */ false);
} else if (*needs_taken_test) {
if (!GenerateCode(
- trip->op_b, nullptr, nullptr, nullptr, nullptr, in_body, /* is_min */ false)) {
+ trip->op_b, nullptr, nullptr, nullptr, nullptr, in_body, /* is_min= */ false)) {
return false;
}
}
@@ -1030,9 +1030,9 @@
return
// Success on lower if invariant (not set), or code can be generated.
((info->induction_class == HInductionVarAnalysis::kInvariant) ||
- GenerateCode(info, trip, graph, block, lower, in_body, /* is_min */ true)) &&
+ GenerateCode(info, trip, graph, block, lower, in_body, /* is_min= */ true)) &&
// And success on upper.
- GenerateCode(info, trip, graph, block, upper, in_body, /* is_min */ false);
+ GenerateCode(info, trip, graph, block, upper, in_body, /* is_min= */ false);
}
bool InductionVarRange::GenerateLastValuePolynomial(HInductionVarAnalysis::InductionInfo* info,
diff --git a/compiler/optimizing/induction_var_range_test.cc b/compiler/optimizing/induction_var_range_test.cc
index 223e08e..f6af384 100644
--- a/compiler/optimizing/induction_var_range_test.cc
+++ b/compiler/optimizing/induction_var_range_test.cc
@@ -252,24 +252,24 @@
Value GetMin(HInductionVarAnalysis::InductionInfo* info,
HInductionVarAnalysis::InductionInfo* trip) {
- return range_.GetVal(info, trip, /* in_body */ true, /* is_min */ true);
+ return range_.GetVal(info, trip, /* in_body= */ true, /* is_min= */ true);
}
Value GetMax(HInductionVarAnalysis::InductionInfo* info,
HInductionVarAnalysis::InductionInfo* trip) {
- return range_.GetVal(info, trip, /* in_body */ true, /* is_min */ false);
+ return range_.GetVal(info, trip, /* in_body= */ true, /* is_min= */ false);
}
Value GetMul(HInductionVarAnalysis::InductionInfo* info1,
HInductionVarAnalysis::InductionInfo* info2,
bool is_min) {
- return range_.GetMul(info1, info2, nullptr, /* in_body */ true, is_min);
+ return range_.GetMul(info1, info2, nullptr, /* in_body= */ true, is_min);
}
Value GetDiv(HInductionVarAnalysis::InductionInfo* info1,
HInductionVarAnalysis::InductionInfo* info2,
bool is_min) {
- return range_.GetDiv(info1, info2, nullptr, /* in_body */ true, is_min);
+ return range_.GetDiv(info1, info2, nullptr, /* in_body= */ true, is_min);
}
Value GetRem(HInductionVarAnalysis::InductionInfo* info1,
diff --git a/compiler/optimizing/inliner.cc b/compiler/optimizing/inliner.cc
index 417d794..8440e9a 100644
--- a/compiler/optimizing/inliner.cc
+++ b/compiler/optimizing/inliner.cc
@@ -175,7 +175,7 @@
if (honor_noinline_directives) {
// Debugging case: directives in method names control or assert on inlining.
std::string callee_name = outer_compilation_unit_.GetDexFile()->PrettyMethod(
- call->GetDexMethodIndex(), /* with_signature */ false);
+ call->GetDexMethodIndex(), /* with_signature= */ false);
// Tests prevent inlining by having $noinline$ in their method names.
if (callee_name.find("$noinline$") == std::string::npos) {
if (TryInline(call)) {
@@ -504,7 +504,7 @@
bool result = TryInlineAndReplace(invoke_instruction,
actual_method,
ReferenceTypeInfo::CreateInvalid(),
- /* do_rtp */ true,
+ /* do_rtp= */ true,
cha_devirtualize);
if (result) {
// Successfully inlined.
@@ -858,9 +858,9 @@
HBasicBlock* bb_cursor = invoke_instruction->GetBlock();
if (!TryInlineAndReplace(invoke_instruction,
resolved_method,
- ReferenceTypeInfo::Create(monomorphic_type, /* is_exact */ true),
- /* do_rtp */ false,
- /* cha_devirtualize */ false)) {
+ ReferenceTypeInfo::Create(monomorphic_type, /* is_exact= */ true),
+ /* do_rtp= */ false,
+ /* cha_devirtualize= */ false)) {
return false;
}
@@ -871,7 +871,7 @@
class_index,
monomorphic_type,
invoke_instruction,
- /* with_deoptimization */ true);
+ /* with_deoptimization= */ true);
// Run type propagation to get the guard typed, and eventually propagate the
// type of the receiver.
@@ -879,7 +879,7 @@
outer_compilation_unit_.GetClassLoader(),
outer_compilation_unit_.GetDexCache(),
handles_,
- /* is_first_run */ false);
+ /* is_first_run= */ false);
rtp_fixup.Run();
MaybeRecordStat(stats_, MethodCompilationStat::kInlinedMonomorphicCall);
@@ -949,7 +949,7 @@
klass,
is_referrer,
invoke_instruction->GetDexPc(),
- /* needs_access_check */ false);
+ /* needs_access_check= */ false);
HLoadClass::LoadKind kind = HSharpening::ComputeLoadClassKind(
load_class, codegen_, caller_compilation_unit_);
DCHECK(kind != HLoadClass::LoadKind::kInvalid)
@@ -1027,7 +1027,7 @@
if (!class_index.IsValid() ||
!TryBuildAndInline(invoke_instruction,
method,
- ReferenceTypeInfo::Create(handle, /* is_exact */ true),
+ ReferenceTypeInfo::Create(handle, /* is_exact= */ true),
&return_replacement)) {
all_targets_inlined = false;
} else {
@@ -1079,7 +1079,7 @@
outer_compilation_unit_.GetClassLoader(),
outer_compilation_unit_.GetDexCache(),
handles_,
- /* is_first_run */ false);
+ /* is_first_run= */ false);
rtp_fixup.Run();
return true;
}
@@ -1150,14 +1150,14 @@
graph_->UpdateLoopAndTryInformationOfNewBlock(
- then, original_invoke_block, /* replace_if_back_edge */ false);
+ then, original_invoke_block, /* replace_if_back_edge= */ false);
graph_->UpdateLoopAndTryInformationOfNewBlock(
- otherwise, original_invoke_block, /* replace_if_back_edge */ false);
+ otherwise, original_invoke_block, /* replace_if_back_edge= */ false);
// In case the original invoke location was a back edge, we need to update
// the loop to now have the merge block as a back edge.
graph_->UpdateLoopAndTryInformationOfNewBlock(
- merge, original_invoke_block, /* replace_if_back_edge */ true);
+ merge, original_invoke_block, /* replace_if_back_edge= */ true);
}
bool HInliner::TryInlinePolymorphicCallToSameTarget(
@@ -1275,7 +1275,7 @@
outer_compilation_unit_.GetClassLoader(),
outer_compilation_unit_.GetDexCache(),
handles_,
- /* is_first_run */ false);
+ /* is_first_run= */ false);
rtp_fixup.Run();
MaybeRecordStat(stats_, MethodCompilationStat::kInlinedPolymorphicCall);
@@ -1399,7 +1399,7 @@
outer_compilation_unit_.GetClassLoader(),
outer_compilation_unit_.GetDexCache(),
handles_,
- /* is_first_run */ false).Run();
+ /* is_first_run= */ false).Run();
}
return true;
}
@@ -1625,7 +1625,8 @@
[](uint16_t index) { return index != DexFile::kDexNoIndex16; }));
// Create HInstanceFieldSet for each IPUT that stores non-zero data.
- HInstruction* obj = GetInvokeInputForArgVRegIndex(invoke_instruction, /* this */ 0u);
+ HInstruction* obj = GetInvokeInputForArgVRegIndex(invoke_instruction,
+ /* arg_vreg_index= */ 0u);
bool needs_constructor_barrier = false;
for (size_t i = 0; i != number_of_iputs; ++i) {
HInstruction* value = GetInvokeInputForArgVRegIndex(invoke_instruction, iput_args[i]);
@@ -1667,7 +1668,7 @@
REQUIRES_SHARED(Locks::mutator_lock_) {
ClassLinker* class_linker = Runtime::Current()->GetClassLinker();
ArtField* resolved_field =
- class_linker->LookupResolvedField(field_index, referrer, /* is_static */ false);
+ class_linker->LookupResolvedField(field_index, referrer, /* is_static= */ false);
DCHECK(resolved_field != nullptr);
HInstanceFieldGet* iget = new (graph_->GetAllocator()) HInstanceFieldGet(
obj,
@@ -1680,7 +1681,7 @@
*referrer->GetDexFile(),
// Read barrier generates a runtime call in slow path and we need a valid
// dex pc for the associated stack map. 0 is bogus but valid. Bug: 26854537.
- /* dex_pc */ 0);
+ /* dex_pc= */ 0);
if (iget->GetType() == DataType::Type::kReference) {
// Use the same dex_cache that we used for field lookup as the hint_dex_cache.
Handle<mirror::DexCache> dex_cache = handles_->NewHandle(referrer->GetDexCache());
@@ -1688,7 +1689,7 @@
outer_compilation_unit_.GetClassLoader(),
dex_cache,
handles_,
- /* is_first_run */ false);
+ /* is_first_run= */ false);
rtp.Visit(iget);
}
return iget;
@@ -1702,7 +1703,7 @@
REQUIRES_SHARED(Locks::mutator_lock_) {
ClassLinker* class_linker = Runtime::Current()->GetClassLinker();
ArtField* resolved_field =
- class_linker->LookupResolvedField(field_index, referrer, /* is_static */ false);
+ class_linker->LookupResolvedField(field_index, referrer, /* is_static= */ false);
DCHECK(resolved_field != nullptr);
if (is_final != nullptr) {
// This information is needed only for constructors.
@@ -1721,7 +1722,7 @@
*referrer->GetDexFile(),
// Read barrier generates a runtime call in slow path and we need a valid
// dex pc for the associated stack map. 0 is bogus but valid. Bug: 26854537.
- /* dex_pc */ 0);
+ /* dex_pc= */ 0);
return iput;
}
@@ -1755,7 +1756,7 @@
HInstruction** return_replacement) {
DCHECK(!(resolved_method->IsStatic() && receiver_type.IsValid()));
ScopedObjectAccess soa(Thread::Current());
- const DexFile::CodeItem* code_item = resolved_method->GetCodeItem();
+ const dex::CodeItem* code_item = resolved_method->GetCodeItem();
const DexFile& callee_dex_file = *resolved_method->GetDexFile();
uint32_t method_index = resolved_method->GetDexMethodIndex();
CodeItemDebugInfoAccessor code_item_accessor(resolved_method->DexInstructionDebugInfo());
@@ -1777,7 +1778,7 @@
resolved_method->GetDeclaringClass()->GetDexClassDefIndex(),
method_index,
resolved_method->GetAccessFlags(),
- /* verified_method */ nullptr,
+ /* verified_method= */ nullptr,
dex_cache,
compiling_class);
@@ -1797,7 +1798,7 @@
codegen_->GetCompilerOptions().GetInstructionSet(),
invoke_type,
graph_->IsDebuggable(),
- /* osr */ false,
+ /* osr= */ false,
caller_instruction_counter);
callee_graph->SetArtMethod(resolved_method);
@@ -1878,7 +1879,7 @@
outer_compilation_unit_.GetClassLoader(),
dex_compilation_unit.GetDexCache(),
handles_,
- /* is_first_run */ false).Run();
+ /* is_first_run= */ false).Run();
}
RunOptimizations(callee_graph, code_item, dex_compilation_unit);
@@ -2026,7 +2027,7 @@
}
void HInliner::RunOptimizations(HGraph* callee_graph,
- const DexFile::CodeItem* code_item,
+ const dex::CodeItem* code_item,
const DexCompilationUnit& dex_compilation_unit) {
// Note: if the outermost_graph_ is being compiled OSR, we should not run any
// optimization that could lead to a HDeoptimize. The following optimizations do not.
@@ -2102,7 +2103,7 @@
// is more specific than the class which declares the method.
if (!resolved_method->IsStatic()) {
if (IsReferenceTypeRefinement(GetClassRTI(resolved_method->GetDeclaringClass()),
- /* declared_can_be_null */ false,
+ /* declared_can_be_null= */ false,
invoke_instruction->InputAt(0u))) {
return true;
}
@@ -2111,7 +2112,7 @@
// Iterate over the list of parameter types and test whether any of the
// actual inputs has a more specific reference type than the type declared in
// the signature.
- const DexFile::TypeList* param_list = resolved_method->GetParameterTypeList();
+ const dex::TypeList* param_list = resolved_method->GetParameterTypeList();
for (size_t param_idx = 0,
input_idx = resolved_method->IsStatic() ? 0 : 1,
e = (param_list == nullptr ? 0 : param_list->Size());
@@ -2122,7 +2123,7 @@
ObjPtr<mirror::Class> param_cls = resolved_method->LookupResolvedClassFromTypeIndex(
param_list->GetTypeItem(param_idx).type_idx_);
if (IsReferenceTypeRefinement(GetClassRTI(param_cls),
- /* declared_can_be_null */ true,
+ /* declared_can_be_null= */ true,
input)) {
return true;
}
@@ -2139,7 +2140,7 @@
if (return_replacement->GetType() == DataType::Type::kReference) {
// Test if the return type is a refinement of the declared return type.
if (IsReferenceTypeRefinement(invoke_instruction->GetReferenceTypeInfo(),
- /* declared_can_be_null */ true,
+ /* declared_can_be_null= */ true,
return_replacement)) {
return true;
} else if (return_replacement->IsInstanceFieldGet()) {
diff --git a/compiler/optimizing/inliner.h b/compiler/optimizing/inliner.h
index 8ac2163..efd4c74 100644
--- a/compiler/optimizing/inliner.h
+++ b/compiler/optimizing/inliner.h
@@ -99,7 +99,7 @@
// Run simple optimizations on `callee_graph`.
void RunOptimizations(HGraph* callee_graph,
- const DexFile::CodeItem* code_item,
+ const dex::CodeItem* code_item,
const DexCompilationUnit& dex_compilation_unit)
REQUIRES_SHARED(Locks::mutator_lock_);
diff --git a/compiler/optimizing/instruction_builder.cc b/compiler/optimizing/instruction_builder.cc
index 448fed9..5e7b575 100644
--- a/compiler/optimizing/instruction_builder.cc
+++ b/compiler/optimizing/instruction_builder.cc
@@ -434,7 +434,7 @@
HInvokeStaticOrDirect::DispatchInfo dispatch_info = {
HInvokeStaticOrDirect::MethodLoadKind::kRuntimeCall,
HInvokeStaticOrDirect::CodePtrLocation::kCallArtMethod,
- /* method_load_data */ 0u
+ /* method_load_data= */ 0u
};
InvokeType invoke_type = dex_compilation_unit_->IsStatic() ? kStatic : kDirect;
HInvokeStaticOrDirect* invoke = new (allocator_) HInvokeStaticOrDirect(
@@ -449,7 +449,7 @@
target_method,
HInvokeStaticOrDirect::ClinitCheckRequirement::kNone);
RangeInstructionOperands operands(graph_->GetNumberOfVRegs() - in_vregs, in_vregs);
- HandleInvoke(invoke, operands, dex_file_->GetMethodShorty(method_idx), /* is_unresolved */ false);
+ HandleInvoke(invoke, operands, dex_file_->GetMethodShorty(method_idx), /* is_unresolved= */ false);
// Add the return instruction.
if (return_type_ == DataType::Type::kVoid) {
@@ -468,7 +468,7 @@
ArenaBitVector* HInstructionBuilder::FindNativeDebugInfoLocations() {
ArenaBitVector* locations = ArenaBitVector::Create(local_allocator_,
code_item_accessor_.InsnsSizeInCodeUnits(),
- /* expandable */ false,
+ /* expandable= */ false,
kArenaAllocGraphBuilder);
locations->ClearAllBits();
// The visitor gets called when the line number changes.
@@ -559,7 +559,7 @@
uint16_t locals_index = graph_->GetNumberOfLocalVRegs();
uint16_t parameter_index = 0;
- const DexFile::MethodId& referrer_method_id =
+ const dex::MethodId& referrer_method_id =
dex_file_->GetMethodId(dex_compilation_unit_->GetDexMethodIndex());
if (!dex_compilation_unit_->IsStatic()) {
// Add the implicit 'this' argument, not expressed in the signature.
@@ -567,7 +567,7 @@
referrer_method_id.class_idx_,
parameter_index++,
DataType::Type::kReference,
- /* is_this */ true);
+ /* is_this= */ true);
AppendInstruction(parameter);
UpdateLocal(locals_index++, parameter);
number_of_parameters--;
@@ -576,15 +576,15 @@
DCHECK(current_this_parameter_ == nullptr);
}
- const DexFile::ProtoId& proto = dex_file_->GetMethodPrototype(referrer_method_id);
- const DexFile::TypeList* arg_types = dex_file_->GetProtoParameters(proto);
+ const dex::ProtoId& proto = dex_file_->GetMethodPrototype(referrer_method_id);
+ const dex::TypeList* arg_types = dex_file_->GetProtoParameters(proto);
for (int i = 0, shorty_pos = 1; i < number_of_parameters; i++) {
HParameterValue* parameter = new (allocator_) HParameterValue(
*dex_file_,
arg_types->GetTypeItem(shorty_pos - 1).type_idx_,
parameter_index++,
DataType::FromShorty(shorty[shorty_pos]),
- /* is_this */ false);
+ /* is_this= */ false);
++shorty_pos;
AppendInstruction(parameter);
// Store the parameter value in the local that the dex code will use
@@ -926,7 +926,7 @@
dex_pc,
method_idx,
invoke_type);
- return HandleInvoke(invoke, operands, shorty, /* is_unresolved */ true);
+ return HandleInvoke(invoke, operands, shorty, /* is_unresolved= */ true);
}
// Replace calls to String.<init> with StringFactory.
@@ -945,10 +945,10 @@
HInvoke* invoke = new (allocator_) HInvokeStaticOrDirect(
allocator_,
number_of_arguments - 1,
- DataType::Type::kReference /*return_type */,
+ /* return_type= */ DataType::Type::kReference,
dex_pc,
method_idx,
- nullptr /* resolved_method */,
+ /* resolved_method= */ nullptr,
dispatch_info,
invoke_type,
target_method,
@@ -1010,7 +1010,7 @@
resolved_method,
ImTable::GetImtIndex(resolved_method));
}
- return HandleInvoke(invoke, operands, shorty, /* is_unresolved */ false, clinit_check);
+ return HandleInvoke(invoke, operands, shorty, /* is_unresolved= */ false, clinit_check);
}
bool HInstructionBuilder::BuildInvokePolymorphic(uint32_t dex_pc,
@@ -1026,7 +1026,7 @@
return_type,
dex_pc,
method_idx);
- return HandleInvoke(invoke, operands, shorty, /* is_unresolved */ false);
+ return HandleInvoke(invoke, operands, shorty, /* is_unresolved= */ false);
}
@@ -1042,7 +1042,7 @@
call_site_idx,
return_type,
dex_pc);
- return HandleInvoke(invoke, operands, shorty, /* is_unresolved */ false);
+ return HandleInvoke(invoke, operands, shorty, /* is_unresolved= */ false);
}
HNewInstance* HInstructionBuilder::BuildNewInstance(dex::TypeIndex type_index, uint32_t dex_pc) {
@@ -1370,7 +1370,7 @@
klass->GetDexFile(),
klass,
dex_pc,
- /* needs_access_check */ false);
+ /* needs_access_check= */ false);
if (cls != nullptr) {
*clinit_check_requirement = HInvokeStaticOrDirect::ClinitCheckRequirement::kExplicit;
clinit_check = new (allocator_) HClinitCheck(cls, dex_pc);
@@ -1515,7 +1515,7 @@
}
static DataType::Type GetFieldAccessType(const DexFile& dex_file, uint16_t field_index) {
- const DexFile::FieldId& field_id = dex_file.GetFieldId(field_index);
+ const dex::FieldId& field_id = dex_file.GetFieldId(field_index);
const char* type = dex_file.GetFieldTypeDescriptor(field_id);
return DataType::FromShorty(type[0]);
}
@@ -1539,7 +1539,7 @@
}
ScopedObjectAccess soa(Thread::Current());
- ArtField* resolved_field = ResolveField(field_index, /* is_static */ false, is_put);
+ ArtField* resolved_field = ResolveField(field_index, /* is_static= */ false, is_put);
// Generate an explicit null check on the reference, unless the field access
// is unresolved. In that case, we rely on the runtime to perform various
@@ -1673,7 +1673,7 @@
uint16_t field_index = instruction.VRegB_21c();
ScopedObjectAccess soa(Thread::Current());
- ArtField* resolved_field = ResolveField(field_index, /* is_static */ true, is_put);
+ ArtField* resolved_field = ResolveField(field_index, /* is_static= */ true, is_put);
if (resolved_field == nullptr) {
MaybeRecordStat(compilation_stats_,
@@ -1690,7 +1690,7 @@
klass->GetDexFile(),
klass,
dex_pc,
- /* needs_access_check */ false);
+ /* needs_access_check= */ false);
if (constant == nullptr) {
// The class cannot be referenced from this compiled code. Generate
@@ -2946,7 +2946,7 @@
case Instruction::IGET_CHAR_QUICK:
case Instruction::IGET_SHORT:
case Instruction::IGET_SHORT_QUICK: {
- if (!BuildInstanceFieldAccess(instruction, dex_pc, /* is_put */ false, quicken_index)) {
+ if (!BuildInstanceFieldAccess(instruction, dex_pc, /* is_put= */ false, quicken_index)) {
return false;
}
break;
@@ -2966,7 +2966,7 @@
case Instruction::IPUT_CHAR_QUICK:
case Instruction::IPUT_SHORT:
case Instruction::IPUT_SHORT_QUICK: {
- if (!BuildInstanceFieldAccess(instruction, dex_pc, /* is_put */ true, quicken_index)) {
+ if (!BuildInstanceFieldAccess(instruction, dex_pc, /* is_put= */ true, quicken_index)) {
return false;
}
break;
@@ -2979,7 +2979,7 @@
case Instruction::SGET_BYTE:
case Instruction::SGET_CHAR:
case Instruction::SGET_SHORT: {
- BuildStaticFieldAccess(instruction, dex_pc, /* is_put */ false);
+ BuildStaticFieldAccess(instruction, dex_pc, /* is_put= */ false);
break;
}
@@ -2990,7 +2990,7 @@
case Instruction::SPUT_BYTE:
case Instruction::SPUT_CHAR:
case Instruction::SPUT_SHORT: {
- BuildStaticFieldAccess(instruction, dex_pc, /* is_put */ true);
+ BuildStaticFieldAccess(instruction, dex_pc, /* is_put= */ true);
break;
}
@@ -3143,7 +3143,7 @@
ObjPtr<mirror::Class> HInstructionBuilder::LookupReferrerClass() const {
// TODO: Cache the result in a Handle<mirror::Class>.
- const DexFile::MethodId& method_id =
+ const dex::MethodId& method_id =
dex_compilation_unit_->GetDexFile()->GetMethodId(dex_compilation_unit_->GetDexMethodIndex());
return LookupResolvedType(method_id.class_idx_, *dex_compilation_unit_);
}
diff --git a/compiler/optimizing/instruction_simplifier.cc b/compiler/optimizing/instruction_simplifier.cc
index 4c6d6ba..a433d7e 100644
--- a/compiler/optimizing/instruction_simplifier.cc
+++ b/compiler/optimizing/instruction_simplifier.cc
@@ -372,7 +372,7 @@
// (as defined by shift semantics). This ensures other
// optimizations do not need to special case for such situations.
DCHECK_EQ(shift_amount->GetType(), DataType::Type::kInt32);
- instruction->ReplaceInput(GetGraph()->GetIntConstant(masked_cst), /* index */ 1);
+ instruction->ReplaceInput(GetGraph()->GetIntConstant(masked_cst), /* index= */ 1);
RecordSimplification();
return;
}
@@ -2361,17 +2361,17 @@
ArenaAllocator* allocator = GetGraph()->GetAllocator();
// We treat String as an array to allow DCE and BCE to seamlessly work on strings,
// so create the HArrayLength, HBoundsCheck and HArrayGet.
- HArrayLength* length = new (allocator) HArrayLength(str, dex_pc, /* is_string_length */ true);
+ HArrayLength* length = new (allocator) HArrayLength(str, dex_pc, /* is_string_length= */ true);
invoke->GetBlock()->InsertInstructionBefore(length, invoke);
HBoundsCheck* bounds_check = new (allocator) HBoundsCheck(
- index, length, dex_pc, /* is_string_char_at */ true);
+ index, length, dex_pc, /* is_string_char_at= */ true);
invoke->GetBlock()->InsertInstructionBefore(bounds_check, invoke);
HArrayGet* array_get = new (allocator) HArrayGet(str,
bounds_check,
DataType::Type::kUint16,
SideEffects::None(), // Strings are immutable.
dex_pc,
- /* is_string_char_at */ true);
+ /* is_string_char_at= */ true);
invoke->GetBlock()->ReplaceAndRemoveInstructionWith(invoke, array_get);
bounds_check->CopyEnvironmentFrom(invoke->GetEnvironment());
GetGraph()->SetHasBoundsChecks(true);
@@ -2383,7 +2383,7 @@
// We treat String as an array to allow DCE and BCE to seamlessly work on strings,
// so create the HArrayLength.
HArrayLength* length =
- new (GetGraph()->GetAllocator()) HArrayLength(str, dex_pc, /* is_string_length */ true);
+ new (GetGraph()->GetAllocator()) HArrayLength(str, dex_pc, /* is_string_length= */ true);
HInstruction* replacement;
if (invoke->GetIntrinsic() == Intrinsics::kStringIsEmpty) {
// For String.isEmpty(), create the `HEqual` representing the `length == 0`.
@@ -2534,28 +2534,28 @@
SimplifySystemArrayCopy(instruction);
break;
case Intrinsics::kIntegerRotateRight:
- SimplifyRotate(instruction, /* is_left */ false, DataType::Type::kInt32);
+ SimplifyRotate(instruction, /* is_left= */ false, DataType::Type::kInt32);
break;
case Intrinsics::kLongRotateRight:
- SimplifyRotate(instruction, /* is_left */ false, DataType::Type::kInt64);
+ SimplifyRotate(instruction, /* is_left= */ false, DataType::Type::kInt64);
break;
case Intrinsics::kIntegerRotateLeft:
- SimplifyRotate(instruction, /* is_left */ true, DataType::Type::kInt32);
+ SimplifyRotate(instruction, /* is_left= */ true, DataType::Type::kInt32);
break;
case Intrinsics::kLongRotateLeft:
- SimplifyRotate(instruction, /* is_left */ true, DataType::Type::kInt64);
+ SimplifyRotate(instruction, /* is_left= */ true, DataType::Type::kInt64);
break;
case Intrinsics::kIntegerCompare:
- SimplifyCompare(instruction, /* is_signum */ false, DataType::Type::kInt32);
+ SimplifyCompare(instruction, /* is_signum= */ false, DataType::Type::kInt32);
break;
case Intrinsics::kLongCompare:
- SimplifyCompare(instruction, /* is_signum */ false, DataType::Type::kInt64);
+ SimplifyCompare(instruction, /* is_signum= */ false, DataType::Type::kInt64);
break;
case Intrinsics::kIntegerSignum:
- SimplifyCompare(instruction, /* is_signum */ true, DataType::Type::kInt32);
+ SimplifyCompare(instruction, /* is_signum= */ true, DataType::Type::kInt32);
break;
case Intrinsics::kLongSignum:
- SimplifyCompare(instruction, /* is_signum */ true, DataType::Type::kInt64);
+ SimplifyCompare(instruction, /* is_signum= */ true, DataType::Type::kInt64);
break;
case Intrinsics::kFloatIsNaN:
case Intrinsics::kDoubleIsNaN:
diff --git a/compiler/optimizing/instruction_simplifier_arm.cc b/compiler/optimizing/instruction_simplifier_arm.cc
index f968c19..01e9cff 100644
--- a/compiler/optimizing/instruction_simplifier_arm.cc
+++ b/compiler/optimizing/instruction_simplifier_arm.cc
@@ -43,11 +43,11 @@
bool TryMergeIntoUsersShifterOperand(HInstruction* instruction);
bool TryMergeIntoShifterOperand(HInstruction* use, HInstruction* bitfield_op, bool do_merge);
bool CanMergeIntoShifterOperand(HInstruction* use, HInstruction* bitfield_op) {
- return TryMergeIntoShifterOperand(use, bitfield_op, /* do_merge */ false);
+ return TryMergeIntoShifterOperand(use, bitfield_op, /* do_merge= */ false);
}
bool MergeIntoShifterOperand(HInstruction* use, HInstruction* bitfield_op) {
DCHECK(CanMergeIntoShifterOperand(use, bitfield_op));
- return TryMergeIntoShifterOperand(use, bitfield_op, /* do_merge */ true);
+ return TryMergeIntoShifterOperand(use, bitfield_op, /* do_merge= */ true);
}
/**
diff --git a/compiler/optimizing/instruction_simplifier_arm64.cc b/compiler/optimizing/instruction_simplifier_arm64.cc
index b536cb4..e23decb 100644
--- a/compiler/optimizing/instruction_simplifier_arm64.cc
+++ b/compiler/optimizing/instruction_simplifier_arm64.cc
@@ -45,11 +45,11 @@
HInstruction* bitfield_op,
bool do_merge);
bool CanMergeIntoShifterOperand(HInstruction* use, HInstruction* bitfield_op) {
- return TryMergeIntoShifterOperand(use, bitfield_op, /* do_merge */ false);
+ return TryMergeIntoShifterOperand(use, bitfield_op, /* do_merge= */ false);
}
bool MergeIntoShifterOperand(HInstruction* use, HInstruction* bitfield_op) {
DCHECK(CanMergeIntoShifterOperand(use, bitfield_op));
- return TryMergeIntoShifterOperand(use, bitfield_op, /* do_merge */ true);
+ return TryMergeIntoShifterOperand(use, bitfield_op, /* do_merge= */ true);
}
/**
diff --git a/compiler/optimizing/intrinsic_objects.cc b/compiler/optimizing/intrinsic_objects.cc
index 0374b4e..c345624 100644
--- a/compiler/optimizing/intrinsic_objects.cc
+++ b/compiler/optimizing/intrinsic_objects.cc
@@ -30,7 +30,7 @@
ClassLinker* class_linker)
REQUIRES_SHARED(Locks::mutator_lock_) {
ObjPtr<mirror::Class> integer_cache_class = class_linker->LookupClass(
- self, "Ljava/lang/Integer$IntegerCache;", /* class_linker */ nullptr);
+ self, "Ljava/lang/Integer$IntegerCache;", /* class_loader= */ nullptr);
if (integer_cache_class == nullptr || !integer_cache_class->IsInitialized()) {
return nullptr;
}
diff --git a/compiler/optimizing/intrinsics.cc b/compiler/optimizing/intrinsics.cc
index 619cd8e..2721cb5 100644
--- a/compiler/optimizing/intrinsics.cc
+++ b/compiler/optimizing/intrinsics.cc
@@ -76,7 +76,7 @@
const char* descriptor)
REQUIRES_SHARED(Locks::mutator_lock_) {
ObjPtr<mirror::Class> klass =
- class_linker->LookupClass(self, descriptor, /* class_loader */ nullptr);
+ class_linker->LookupClass(self, descriptor, /* class_loader= */ nullptr);
DCHECK(klass != nullptr);
DCHECK(klass->IsInitialized());
return klass;
@@ -166,14 +166,14 @@
Thread* self = Thread::Current();
ScopedObjectAccess soa(self);
ObjPtr<mirror::Class> cache_class = class_linker->LookupClass(
- self, kIntegerCacheDescriptor, /* class_loader */ nullptr);
+ self, kIntegerCacheDescriptor, /* class_loader= */ nullptr);
DCHECK(cache_class != nullptr);
if (UNLIKELY(!cache_class->IsInitialized())) {
LOG(WARNING) << "Image class " << cache_class->PrettyDescriptor() << " is uninitialized.";
return;
}
ObjPtr<mirror::Class> integer_class =
- class_linker->LookupClass(self, kIntegerDescriptor, /* class_loader */ nullptr);
+ class_linker->LookupClass(self, kIntegerDescriptor, /* class_loader= */ nullptr);
DCHECK(integer_class != nullptr);
if (UNLIKELY(!integer_class->IsInitialized())) {
LOG(WARNING) << "Image class " << integer_class->PrettyDescriptor() << " is uninitialized.";
diff --git a/compiler/optimizing/intrinsics_arm64.cc b/compiler/optimizing/intrinsics_arm64.cc
index 0b17c9d..ae1650e 100644
--- a/compiler/optimizing/intrinsics_arm64.cc
+++ b/compiler/optimizing/intrinsics_arm64.cc
@@ -272,10 +272,10 @@
}
void IntrinsicCodeGeneratorARM64::VisitDoubleDoubleToRawLongBits(HInvoke* invoke) {
- MoveFPToInt(invoke->GetLocations(), /* is64bit */ true, GetVIXLAssembler());
+ MoveFPToInt(invoke->GetLocations(), /* is64bit= */ true, GetVIXLAssembler());
}
void IntrinsicCodeGeneratorARM64::VisitDoubleLongBitsToDouble(HInvoke* invoke) {
- MoveIntToFP(invoke->GetLocations(), /* is64bit */ true, GetVIXLAssembler());
+ MoveIntToFP(invoke->GetLocations(), /* is64bit= */ true, GetVIXLAssembler());
}
void IntrinsicLocationsBuilderARM64::VisitFloatFloatToRawIntBits(HInvoke* invoke) {
@@ -286,10 +286,10 @@
}
void IntrinsicCodeGeneratorARM64::VisitFloatFloatToRawIntBits(HInvoke* invoke) {
- MoveFPToInt(invoke->GetLocations(), /* is64bit */ false, GetVIXLAssembler());
+ MoveFPToInt(invoke->GetLocations(), /* is64bit= */ false, GetVIXLAssembler());
}
void IntrinsicCodeGeneratorARM64::VisitFloatIntBitsToFloat(HInvoke* invoke) {
- MoveIntToFP(invoke->GetLocations(), /* is64bit */ false, GetVIXLAssembler());
+ MoveIntToFP(invoke->GetLocations(), /* is64bit= */ false, GetVIXLAssembler());
}
static void CreateIntToIntLocations(ArenaAllocator* allocator, HInvoke* invoke) {
@@ -618,7 +618,7 @@
}
void IntrinsicCodeGeneratorARM64::VisitMathRoundDouble(HInvoke* invoke) {
- GenMathRound(invoke, /* is_double */ true, GetVIXLAssembler());
+ GenMathRound(invoke, /* is_double= */ true, GetVIXLAssembler());
}
void IntrinsicLocationsBuilderARM64::VisitMathRoundFloat(HInvoke* invoke) {
@@ -626,7 +626,7 @@
}
void IntrinsicCodeGeneratorARM64::VisitMathRoundFloat(HInvoke* invoke) {
- GenMathRound(invoke, /* is_double */ false, GetVIXLAssembler());
+ GenMathRound(invoke, /* is_double= */ false, GetVIXLAssembler());
}
void IntrinsicLocationsBuilderARM64::VisitMemoryPeekByte(HInvoke* invoke) {
@@ -752,13 +752,13 @@
trg_loc,
base,
MemOperand(temp.X()),
- /* needs_null_check */ false,
+ /* needs_null_check= */ false,
is_volatile);
} else {
// Other cases.
MemOperand mem_op(base.X(), offset);
if (is_volatile) {
- codegen->LoadAcquire(invoke, trg, mem_op, /* needs_null_check */ true);
+ codegen->LoadAcquire(invoke, trg, mem_op, /* needs_null_check= */ true);
} else {
codegen->Load(type, trg, mem_op);
}
@@ -813,22 +813,22 @@
}
void IntrinsicCodeGeneratorARM64::VisitUnsafeGet(HInvoke* invoke) {
- GenUnsafeGet(invoke, DataType::Type::kInt32, /* is_volatile */ false, codegen_);
+ GenUnsafeGet(invoke, DataType::Type::kInt32, /* is_volatile= */ false, codegen_);
}
void IntrinsicCodeGeneratorARM64::VisitUnsafeGetVolatile(HInvoke* invoke) {
- GenUnsafeGet(invoke, DataType::Type::kInt32, /* is_volatile */ true, codegen_);
+ GenUnsafeGet(invoke, DataType::Type::kInt32, /* is_volatile= */ true, codegen_);
}
void IntrinsicCodeGeneratorARM64::VisitUnsafeGetLong(HInvoke* invoke) {
- GenUnsafeGet(invoke, DataType::Type::kInt64, /* is_volatile */ false, codegen_);
+ GenUnsafeGet(invoke, DataType::Type::kInt64, /* is_volatile= */ false, codegen_);
}
void IntrinsicCodeGeneratorARM64::VisitUnsafeGetLongVolatile(HInvoke* invoke) {
- GenUnsafeGet(invoke, DataType::Type::kInt64, /* is_volatile */ true, codegen_);
+ GenUnsafeGet(invoke, DataType::Type::kInt64, /* is_volatile= */ true, codegen_);
}
void IntrinsicCodeGeneratorARM64::VisitUnsafeGetObject(HInvoke* invoke) {
- GenUnsafeGet(invoke, DataType::Type::kReference, /* is_volatile */ false, codegen_);
+ GenUnsafeGet(invoke, DataType::Type::kReference, /* is_volatile= */ false, codegen_);
}
void IntrinsicCodeGeneratorARM64::VisitUnsafeGetObjectVolatile(HInvoke* invoke) {
- GenUnsafeGet(invoke, DataType::Type::kReference, /* is_volatile */ true, codegen_);
+ GenUnsafeGet(invoke, DataType::Type::kReference, /* is_volatile= */ true, codegen_);
}
static void CreateIntIntIntIntToVoid(ArenaAllocator* allocator, HInvoke* invoke) {
@@ -896,7 +896,7 @@
}
if (is_volatile || is_ordered) {
- codegen->StoreRelease(invoke, type, source, mem_op, /* needs_null_check */ false);
+ codegen->StoreRelease(invoke, type, source, mem_op, /* needs_null_check= */ false);
} else {
codegen->Store(type, source, mem_op);
}
@@ -911,64 +911,64 @@
void IntrinsicCodeGeneratorARM64::VisitUnsafePut(HInvoke* invoke) {
GenUnsafePut(invoke,
DataType::Type::kInt32,
- /* is_volatile */ false,
- /* is_ordered */ false,
+ /* is_volatile= */ false,
+ /* is_ordered= */ false,
codegen_);
}
void IntrinsicCodeGeneratorARM64::VisitUnsafePutOrdered(HInvoke* invoke) {
GenUnsafePut(invoke,
DataType::Type::kInt32,
- /* is_volatile */ false,
- /* is_ordered */ true,
+ /* is_volatile= */ false,
+ /* is_ordered= */ true,
codegen_);
}
void IntrinsicCodeGeneratorARM64::VisitUnsafePutVolatile(HInvoke* invoke) {
GenUnsafePut(invoke,
DataType::Type::kInt32,
- /* is_volatile */ true,
- /* is_ordered */ false,
+ /* is_volatile= */ true,
+ /* is_ordered= */ false,
codegen_);
}
void IntrinsicCodeGeneratorARM64::VisitUnsafePutObject(HInvoke* invoke) {
GenUnsafePut(invoke,
DataType::Type::kReference,
- /* is_volatile */ false,
- /* is_ordered */ false,
+ /* is_volatile= */ false,
+ /* is_ordered= */ false,
codegen_);
}
void IntrinsicCodeGeneratorARM64::VisitUnsafePutObjectOrdered(HInvoke* invoke) {
GenUnsafePut(invoke,
DataType::Type::kReference,
- /* is_volatile */ false,
- /* is_ordered */ true,
+ /* is_volatile= */ false,
+ /* is_ordered= */ true,
codegen_);
}
void IntrinsicCodeGeneratorARM64::VisitUnsafePutObjectVolatile(HInvoke* invoke) {
GenUnsafePut(invoke,
DataType::Type::kReference,
- /* is_volatile */ true,
- /* is_ordered */ false,
+ /* is_volatile= */ true,
+ /* is_ordered= */ false,
codegen_);
}
void IntrinsicCodeGeneratorARM64::VisitUnsafePutLong(HInvoke* invoke) {
GenUnsafePut(invoke,
DataType::Type::kInt64,
- /* is_volatile */ false,
- /* is_ordered */ false,
+ /* is_volatile= */ false,
+ /* is_ordered= */ false,
codegen_);
}
void IntrinsicCodeGeneratorARM64::VisitUnsafePutLongOrdered(HInvoke* invoke) {
GenUnsafePut(invoke,
DataType::Type::kInt64,
- /* is_volatile */ false,
- /* is_ordered */ true,
+ /* is_volatile= */ false,
+ /* is_ordered= */ true,
codegen_);
}
void IntrinsicCodeGeneratorARM64::VisitUnsafePutLongVolatile(HInvoke* invoke) {
GenUnsafePut(invoke,
DataType::Type::kInt64,
- /* is_volatile */ true,
- /* is_ordered */ false,
+ /* is_volatile= */ true,
+ /* is_ordered= */ false,
codegen_);
}
@@ -1638,7 +1638,7 @@
}
void IntrinsicCodeGeneratorARM64::VisitStringIndexOf(HInvoke* invoke) {
- GenerateVisitStringIndexOf(invoke, GetVIXLAssembler(), codegen_, /* start_at_zero */ true);
+ GenerateVisitStringIndexOf(invoke, GetVIXLAssembler(), codegen_, /* start_at_zero= */ true);
}
void IntrinsicLocationsBuilderARM64::VisitStringIndexOfAfter(HInvoke* invoke) {
@@ -1654,7 +1654,7 @@
}
void IntrinsicCodeGeneratorARM64::VisitStringIndexOfAfter(HInvoke* invoke) {
- GenerateVisitStringIndexOf(invoke, GetVIXLAssembler(), codegen_, /* start_at_zero */ false);
+ GenerateVisitStringIndexOf(invoke, GetVIXLAssembler(), codegen_, /* start_at_zero= */ false);
}
void IntrinsicLocationsBuilderARM64::VisitStringNewStringFromBytes(HInvoke* invoke) {
@@ -2456,8 +2456,8 @@
src.W(),
class_offset,
temp3_loc,
- /* needs_null_check */ false,
- /* use_load_acquire */ false);
+ /* needs_null_check= */ false,
+ /* use_load_acquire= */ false);
// Bail out if the source is not a non primitive array.
// /* HeapReference<Class> */ temp1 = temp1->component_type_
codegen_->GenerateFieldLoadWithBakerReadBarrier(invoke,
@@ -2465,8 +2465,8 @@
temp1,
component_offset,
temp3_loc,
- /* needs_null_check */ false,
- /* use_load_acquire */ false);
+ /* needs_null_check= */ false,
+ /* use_load_acquire= */ false);
__ Cbz(temp1, intrinsic_slow_path->GetEntryLabel());
// If heap poisoning is enabled, `temp1` has been unpoisoned
// by the the previous call to GenerateFieldLoadWithBakerReadBarrier.
@@ -2482,8 +2482,8 @@
dest.W(),
class_offset,
temp3_loc,
- /* needs_null_check */ false,
- /* use_load_acquire */ false);
+ /* needs_null_check= */ false,
+ /* use_load_acquire= */ false);
if (!optimizations.GetDestinationIsNonPrimitiveArray()) {
// Bail out if the destination is not a non primitive array.
@@ -2499,8 +2499,8 @@
temp1,
component_offset,
temp3_loc,
- /* needs_null_check */ false,
- /* use_load_acquire */ false);
+ /* needs_null_check= */ false,
+ /* use_load_acquire= */ false);
__ Cbz(temp2, intrinsic_slow_path->GetEntryLabel());
// If heap poisoning is enabled, `temp2` has been unpoisoned
// by the the previous call to GenerateFieldLoadWithBakerReadBarrier.
@@ -2518,8 +2518,8 @@
src.W(),
class_offset,
temp3_loc,
- /* needs_null_check */ false,
- /* use_load_acquire */ false);
+ /* needs_null_check= */ false,
+ /* use_load_acquire= */ false);
// Note: if heap poisoning is on, we are comparing two unpoisoned references here.
__ Cmp(temp1, temp2);
@@ -2532,8 +2532,8 @@
temp1,
component_offset,
temp3_loc,
- /* needs_null_check */ false,
- /* use_load_acquire */ false);
+ /* needs_null_check= */ false,
+ /* use_load_acquire= */ false);
// /* HeapReference<Class> */ temp1 = temp1->super_class_
// We do not need to emit a read barrier for the following
// heap reference load, as `temp1` is only used in a
@@ -2616,16 +2616,16 @@
src.W(),
class_offset,
temp3_loc,
- /* needs_null_check */ false,
- /* use_load_acquire */ false);
+ /* needs_null_check= */ false,
+ /* use_load_acquire= */ false);
// /* HeapReference<Class> */ temp2 = temp1->component_type_
codegen_->GenerateFieldLoadWithBakerReadBarrier(invoke,
temp2_loc,
temp1,
component_offset,
temp3_loc,
- /* needs_null_check */ false,
- /* use_load_acquire */ false);
+ /* needs_null_check= */ false,
+ /* use_load_acquire= */ false);
__ Cbz(temp2, intrinsic_slow_path->GetEntryLabel());
// If heap poisoning is enabled, `temp2` has been unpoisoned
// by the the previous call to GenerateFieldLoadWithBakerReadBarrier.
@@ -2779,7 +2779,7 @@
}
// We only need one card marking on the destination array.
- codegen_->MarkGCCard(dest.W(), Register(), /* value_can_be_null */ false);
+ codegen_->MarkGCCard(dest.W(), Register(), /* value_can_be_null= */ false);
__ Bind(intrinsic_slow_path->GetExitLabel());
}
@@ -2812,7 +2812,7 @@
}
void IntrinsicCodeGeneratorARM64::VisitFloatIsInfinite(HInvoke* invoke) {
- GenIsInfinite(invoke->GetLocations(), /* is64bit */ false, GetVIXLAssembler());
+ GenIsInfinite(invoke->GetLocations(), /* is64bit= */ false, GetVIXLAssembler());
}
void IntrinsicLocationsBuilderARM64::VisitDoubleIsInfinite(HInvoke* invoke) {
@@ -2820,7 +2820,7 @@
}
void IntrinsicCodeGeneratorARM64::VisitDoubleIsInfinite(HInvoke* invoke) {
- GenIsInfinite(invoke->GetLocations(), /* is64bit */ true, GetVIXLAssembler());
+ GenIsInfinite(invoke->GetLocations(), /* is64bit= */ true, GetVIXLAssembler());
}
void IntrinsicLocationsBuilderARM64::VisitIntegerValueOf(HInvoke* invoke) {
@@ -2954,58 +2954,20 @@
__ Mvn(out, tmp);
}
-// The threshold for sizes of arrays to use the library provided implementation
-// of CRC32.updateBytes instead of the intrinsic.
-static constexpr int32_t kCRC32UpdateBytesThreshold = 64 * 1024;
-
-void IntrinsicLocationsBuilderARM64::VisitCRC32UpdateBytes(HInvoke* invoke) {
- if (!codegen_->GetInstructionSetFeatures().HasCRC()) {
- return;
- }
-
- LocationSummary* locations
- = new (allocator_) LocationSummary(invoke,
- LocationSummary::kCallOnSlowPath,
- kIntrinsified);
-
- locations->SetInAt(0, Location::RequiresRegister());
- locations->SetInAt(1, Location::RequiresRegister());
- locations->SetInAt(2, Location::RegisterOrConstant(invoke->InputAt(2)));
- locations->SetInAt(3, Location::RequiresRegister());
- locations->AddTemp(Location::RequiresRegister());
- locations->SetOut(Location::RequiresRegister());
-}
-
-// Lower the invoke of CRC32.updateBytes(int crc, byte[] b, int off, int len)
+// Generate code using CRC32 instructions which calculates
+// a CRC32 value of a byte.
//
-// Note: The intrinsic is not used if len exceeds a threshold.
-void IntrinsicCodeGeneratorARM64::VisitCRC32UpdateBytes(HInvoke* invoke) {
- DCHECK(codegen_->GetInstructionSetFeatures().HasCRC());
-
- auto masm = GetVIXLAssembler();
- auto locations = invoke->GetLocations();
-
- auto slow_path =
- new (codegen_->GetScopedAllocator()) IntrinsicSlowPathARM64(invoke);
- codegen_->AddSlowPath(slow_path);
-
- Register length = WRegisterFrom(locations->InAt(3));
- __ Cmp(length, kCRC32UpdateBytesThreshold);
- __ B(slow_path->GetEntryLabel(), hi);
-
- const uint32_t array_data_offset =
- mirror::Array::DataOffset(Primitive::kPrimByte).Uint32Value();
- Register ptr = XRegisterFrom(locations->GetTemp(0));
- Register array = XRegisterFrom(locations->InAt(1));
- auto offset = locations->InAt(2);
- if (offset.IsConstant()) {
- int32_t offset_value = offset.GetConstant()->AsIntConstant()->GetValue();
- __ Add(ptr, array, array_data_offset + offset_value);
- } else {
- __ Add(ptr, array, array_data_offset);
- __ Add(ptr, ptr, XRegisterFrom(offset));
- }
-
+// Parameters:
+// masm - VIXL macro assembler
+// crc - a register holding an initial CRC value
+// ptr - a register holding a memory address of bytes
+// length - a register holding a number of bytes to process
+// out - a register to put a result of calculation
+static void GenerateCodeForCalculationCRC32ValueOfBytes(MacroAssembler* masm,
+ const Register& crc,
+ const Register& ptr,
+ const Register& length,
+ const Register& out) {
// The algorithm of CRC32 of bytes is:
// crc = ~crc
// process a few first bytes to make the array 8-byte aligned
@@ -3029,8 +2991,7 @@
Register len = temps.AcquireW();
Register array_elem = temps.AcquireW();
- Register out = WRegisterFrom(locations->Out());
- __ Mvn(out, WRegisterFrom(locations->InAt(0)));
+ __ Mvn(out, crc);
__ Mov(len, length);
__ Tbz(ptr, 0, &aligned2);
@@ -3095,10 +3056,111 @@
__ Bind(&done);
__ Mvn(out, out);
+}
+
+// The threshold for sizes of arrays to use the library provided implementation
+// of CRC32.updateBytes instead of the intrinsic.
+static constexpr int32_t kCRC32UpdateBytesThreshold = 64 * 1024;
+
+void IntrinsicLocationsBuilderARM64::VisitCRC32UpdateBytes(HInvoke* invoke) {
+ if (!codegen_->GetInstructionSetFeatures().HasCRC()) {
+ return;
+ }
+
+ LocationSummary* locations =
+ new (allocator_) LocationSummary(invoke,
+ LocationSummary::kCallOnSlowPath,
+ kIntrinsified);
+
+ locations->SetInAt(0, Location::RequiresRegister());
+ locations->SetInAt(1, Location::RequiresRegister());
+ locations->SetInAt(2, Location::RegisterOrConstant(invoke->InputAt(2)));
+ locations->SetInAt(3, Location::RequiresRegister());
+ locations->AddTemp(Location::RequiresRegister());
+ locations->SetOut(Location::RequiresRegister());
+}
+
+// Lower the invoke of CRC32.updateBytes(int crc, byte[] b, int off, int len)
+//
+// Note: The intrinsic is not used if len exceeds a threshold.
+void IntrinsicCodeGeneratorARM64::VisitCRC32UpdateBytes(HInvoke* invoke) {
+ DCHECK(codegen_->GetInstructionSetFeatures().HasCRC());
+
+ auto masm = GetVIXLAssembler();
+ auto locations = invoke->GetLocations();
+
+ auto slow_path =
+ new (codegen_->GetScopedAllocator()) IntrinsicSlowPathARM64(invoke);
+ codegen_->AddSlowPath(slow_path);
+
+ Register length = WRegisterFrom(locations->InAt(3));
+ __ Cmp(length, kCRC32UpdateBytesThreshold);
+ __ B(slow_path->GetEntryLabel(), hi);
+
+ const uint32_t array_data_offset =
+ mirror::Array::DataOffset(Primitive::kPrimByte).Uint32Value();
+ Register ptr = XRegisterFrom(locations->GetTemp(0));
+ Register array = XRegisterFrom(locations->InAt(1));
+ auto offset = locations->InAt(2);
+ if (offset.IsConstant()) {
+ int32_t offset_value = offset.GetConstant()->AsIntConstant()->GetValue();
+ __ Add(ptr, array, array_data_offset + offset_value);
+ } else {
+ __ Add(ptr, array, array_data_offset);
+ __ Add(ptr, ptr, XRegisterFrom(offset));
+ }
+
+ Register crc = WRegisterFrom(locations->InAt(0));
+ Register out = WRegisterFrom(locations->Out());
+
+ GenerateCodeForCalculationCRC32ValueOfBytes(masm, crc, ptr, length, out);
__ Bind(slow_path->GetExitLabel());
}
+void IntrinsicLocationsBuilderARM64::VisitCRC32UpdateByteBuffer(HInvoke* invoke) {
+ if (!codegen_->GetInstructionSetFeatures().HasCRC()) {
+ return;
+ }
+
+ LocationSummary* locations =
+ new (allocator_) LocationSummary(invoke,
+ LocationSummary::kNoCall,
+ kIntrinsified);
+
+ locations->SetInAt(0, Location::RequiresRegister());
+ locations->SetInAt(1, Location::RequiresRegister());
+ locations->SetInAt(2, Location::RequiresRegister());
+ locations->SetInAt(3, Location::RequiresRegister());
+ locations->AddTemp(Location::RequiresRegister());
+ locations->SetOut(Location::RequiresRegister());
+}
+
+// Lower the invoke of CRC32.updateByteBuffer(int crc, long addr, int off, int len)
+//
+// There is no need to generate code checking if addr is 0.
+// The method updateByteBuffer is a private method of java.util.zip.CRC32.
+// This guarantees no calls outside of the CRC32 class.
+// An address of DirectBuffer is always passed to the call of updateByteBuffer.
+// It might be an implementation of an empty DirectBuffer which can use a zero
+// address but it must have the length to be zero. The current generated code
+// correctly works with the zero length.
+void IntrinsicCodeGeneratorARM64::VisitCRC32UpdateByteBuffer(HInvoke* invoke) {
+ DCHECK(codegen_->GetInstructionSetFeatures().HasCRC());
+
+ auto masm = GetVIXLAssembler();
+ auto locations = invoke->GetLocations();
+
+ Register addr = XRegisterFrom(locations->InAt(1));
+ Register ptr = XRegisterFrom(locations->GetTemp(0));
+ __ Add(ptr, addr, XRegisterFrom(locations->InAt(2)));
+
+ Register crc = WRegisterFrom(locations->InAt(0));
+ Register length = WRegisterFrom(locations->InAt(3));
+ Register out = WRegisterFrom(locations->Out());
+ GenerateCodeForCalculationCRC32ValueOfBytes(masm, crc, ptr, length, out);
+}
+
UNIMPLEMENTED_INTRINSIC(ARM64, ReferenceGetReferent)
UNIMPLEMENTED_INTRINSIC(ARM64, StringStringIndexOf);
diff --git a/compiler/optimizing/intrinsics_arm_vixl.cc b/compiler/optimizing/intrinsics_arm_vixl.cc
index 88f1457..396ff62 100644
--- a/compiler/optimizing/intrinsics_arm_vixl.cc
+++ b/compiler/optimizing/intrinsics_arm_vixl.cc
@@ -229,7 +229,7 @@
assembler->MaybePoisonHeapReference(tmp);
__ Str(tmp, MemOperand(dst_curr_addr, element_size, PostIndex));
__ Cmp(src_curr_addr, src_stop_addr);
- __ B(ne, &loop, /* far_target */ false);
+ __ B(ne, &loop, /* is_far_target= */ false);
__ B(GetExitLabel());
}
@@ -298,10 +298,10 @@
}
void IntrinsicCodeGeneratorARMVIXL::VisitDoubleDoubleToRawLongBits(HInvoke* invoke) {
- MoveFPToInt(invoke->GetLocations(), /* is64bit */ true, GetAssembler());
+ MoveFPToInt(invoke->GetLocations(), /* is64bit= */ true, GetAssembler());
}
void IntrinsicCodeGeneratorARMVIXL::VisitDoubleLongBitsToDouble(HInvoke* invoke) {
- MoveIntToFP(invoke->GetLocations(), /* is64bit */ true, GetAssembler());
+ MoveIntToFP(invoke->GetLocations(), /* is64bit= */ true, GetAssembler());
}
void IntrinsicLocationsBuilderARMVIXL::VisitFloatFloatToRawIntBits(HInvoke* invoke) {
@@ -312,10 +312,10 @@
}
void IntrinsicCodeGeneratorARMVIXL::VisitFloatFloatToRawIntBits(HInvoke* invoke) {
- MoveFPToInt(invoke->GetLocations(), /* is64bit */ false, GetAssembler());
+ MoveFPToInt(invoke->GetLocations(), /* is64bit= */ false, GetAssembler());
}
void IntrinsicCodeGeneratorARMVIXL::VisitFloatIntBitsToFloat(HInvoke* invoke) {
- MoveIntToFP(invoke->GetLocations(), /* is64bit */ false, GetAssembler());
+ MoveIntToFP(invoke->GetLocations(), /* is64bit= */ false, GetAssembler());
}
static void CreateIntToIntLocations(ArenaAllocator* allocator, HInvoke* invoke) {
@@ -355,7 +355,7 @@
vixl32::Label end;
vixl32::Label* final_label = codegen->GetFinalLabel(invoke, &end);
__ Clz(out, in_reg_hi);
- __ CompareAndBranchIfNonZero(in_reg_hi, final_label, /* far_target */ false);
+ __ CompareAndBranchIfNonZero(in_reg_hi, final_label, /* is_far_target= */ false);
__ Clz(out, in_reg_lo);
__ Add(out, out, 32);
if (end.IsReferenced()) {
@@ -398,7 +398,7 @@
vixl32::Label* final_label = codegen->GetFinalLabel(invoke, &end);
__ Rbit(out, in_reg_lo);
__ Clz(out, out);
- __ CompareAndBranchIfNonZero(in_reg_lo, final_label, /* far_target */ false);
+ __ CompareAndBranchIfNonZero(in_reg_lo, final_label, /* is_far_target= */ false);
__ Rbit(out, in_reg_hi);
__ Clz(out, out);
__ Add(out, out, 32);
@@ -476,7 +476,7 @@
// For positive, zero or NaN inputs, rounding is done.
__ Cmp(out_reg, 0);
- __ B(ge, final_label, /* far_target */ false);
+ __ B(ge, final_label, /* is_far_target= */ false);
// Handle input < 0 cases.
// If input is negative but not a tie, previous result (round to nearest) is valid.
@@ -642,7 +642,7 @@
__ Add(RegisterFrom(temp), base, Operand(offset));
MemOperand src(RegisterFrom(temp), 0);
codegen->GenerateFieldLoadWithBakerReadBarrier(
- invoke, trg_loc, base, src, /* needs_null_check */ false);
+ invoke, trg_loc, base, src, /* needs_null_check= */ false);
if (is_volatile) {
__ Dmb(vixl32::ISH);
}
@@ -733,22 +733,22 @@
}
void IntrinsicCodeGeneratorARMVIXL::VisitUnsafeGet(HInvoke* invoke) {
- GenUnsafeGet(invoke, DataType::Type::kInt32, /* is_volatile */ false, codegen_);
+ GenUnsafeGet(invoke, DataType::Type::kInt32, /* is_volatile= */ false, codegen_);
}
void IntrinsicCodeGeneratorARMVIXL::VisitUnsafeGetVolatile(HInvoke* invoke) {
- GenUnsafeGet(invoke, DataType::Type::kInt32, /* is_volatile */ true, codegen_);
+ GenUnsafeGet(invoke, DataType::Type::kInt32, /* is_volatile= */ true, codegen_);
}
void IntrinsicCodeGeneratorARMVIXL::VisitUnsafeGetLong(HInvoke* invoke) {
- GenUnsafeGet(invoke, DataType::Type::kInt64, /* is_volatile */ false, codegen_);
+ GenUnsafeGet(invoke, DataType::Type::kInt64, /* is_volatile= */ false, codegen_);
}
void IntrinsicCodeGeneratorARMVIXL::VisitUnsafeGetLongVolatile(HInvoke* invoke) {
- GenUnsafeGet(invoke, DataType::Type::kInt64, /* is_volatile */ true, codegen_);
+ GenUnsafeGet(invoke, DataType::Type::kInt64, /* is_volatile= */ true, codegen_);
}
void IntrinsicCodeGeneratorARMVIXL::VisitUnsafeGetObject(HInvoke* invoke) {
- GenUnsafeGet(invoke, DataType::Type::kReference, /* is_volatile */ false, codegen_);
+ GenUnsafeGet(invoke, DataType::Type::kReference, /* is_volatile= */ false, codegen_);
}
void IntrinsicCodeGeneratorARMVIXL::VisitUnsafeGetObjectVolatile(HInvoke* invoke) {
- GenUnsafeGet(invoke, DataType::Type::kReference, /* is_volatile */ true, codegen_);
+ GenUnsafeGet(invoke, DataType::Type::kReference, /* is_volatile= */ true, codegen_);
}
static void CreateIntIntIntIntToVoid(ArenaAllocator* allocator,
@@ -778,39 +778,39 @@
void IntrinsicLocationsBuilderARMVIXL::VisitUnsafePut(HInvoke* invoke) {
CreateIntIntIntIntToVoid(
- allocator_, features_, DataType::Type::kInt32, /* is_volatile */ false, invoke);
+ allocator_, features_, DataType::Type::kInt32, /* is_volatile= */ false, invoke);
}
void IntrinsicLocationsBuilderARMVIXL::VisitUnsafePutOrdered(HInvoke* invoke) {
CreateIntIntIntIntToVoid(
- allocator_, features_, DataType::Type::kInt32, /* is_volatile */ false, invoke);
+ allocator_, features_, DataType::Type::kInt32, /* is_volatile= */ false, invoke);
}
void IntrinsicLocationsBuilderARMVIXL::VisitUnsafePutVolatile(HInvoke* invoke) {
CreateIntIntIntIntToVoid(
- allocator_, features_, DataType::Type::kInt32, /* is_volatile */ true, invoke);
+ allocator_, features_, DataType::Type::kInt32, /* is_volatile= */ true, invoke);
}
void IntrinsicLocationsBuilderARMVIXL::VisitUnsafePutObject(HInvoke* invoke) {
CreateIntIntIntIntToVoid(
- allocator_, features_, DataType::Type::kReference, /* is_volatile */ false, invoke);
+ allocator_, features_, DataType::Type::kReference, /* is_volatile= */ false, invoke);
}
void IntrinsicLocationsBuilderARMVIXL::VisitUnsafePutObjectOrdered(HInvoke* invoke) {
CreateIntIntIntIntToVoid(
- allocator_, features_, DataType::Type::kReference, /* is_volatile */ false, invoke);
+ allocator_, features_, DataType::Type::kReference, /* is_volatile= */ false, invoke);
}
void IntrinsicLocationsBuilderARMVIXL::VisitUnsafePutObjectVolatile(HInvoke* invoke) {
CreateIntIntIntIntToVoid(
- allocator_, features_, DataType::Type::kReference, /* is_volatile */ true, invoke);
+ allocator_, features_, DataType::Type::kReference, /* is_volatile= */ true, invoke);
}
void IntrinsicLocationsBuilderARMVIXL::VisitUnsafePutLong(HInvoke* invoke) {
CreateIntIntIntIntToVoid(
- allocator_, features_, DataType::Type::kInt64, /* is_volatile */ false, invoke);
+ allocator_, features_, DataType::Type::kInt64, /* is_volatile= */ false, invoke);
}
void IntrinsicLocationsBuilderARMVIXL::VisitUnsafePutLongOrdered(HInvoke* invoke) {
CreateIntIntIntIntToVoid(
- allocator_, features_, DataType::Type::kInt64, /* is_volatile */ false, invoke);
+ allocator_, features_, DataType::Type::kInt64, /* is_volatile= */ false, invoke);
}
void IntrinsicLocationsBuilderARMVIXL::VisitUnsafePutLongVolatile(HInvoke* invoke) {
CreateIntIntIntIntToVoid(
- allocator_, features_, DataType::Type::kInt64, /* is_volatile */ true, invoke);
+ allocator_, features_, DataType::Type::kInt64, /* is_volatile= */ true, invoke);
}
static void GenUnsafePut(LocationSummary* locations,
@@ -844,7 +844,7 @@
__ Ldrexd(temp_lo, temp_hi, MemOperand(temp_reg));
__ Strexd(temp_lo, value_lo, value_hi, MemOperand(temp_reg));
__ Cmp(temp_lo, 0);
- __ B(ne, &loop_head, /* far_target */ false);
+ __ B(ne, &loop_head, /* is_far_target= */ false);
} else {
__ Strd(value_lo, value_hi, MemOperand(base, offset));
}
@@ -875,64 +875,64 @@
void IntrinsicCodeGeneratorARMVIXL::VisitUnsafePut(HInvoke* invoke) {
GenUnsafePut(invoke->GetLocations(),
DataType::Type::kInt32,
- /* is_volatile */ false,
- /* is_ordered */ false,
+ /* is_volatile= */ false,
+ /* is_ordered= */ false,
codegen_);
}
void IntrinsicCodeGeneratorARMVIXL::VisitUnsafePutOrdered(HInvoke* invoke) {
GenUnsafePut(invoke->GetLocations(),
DataType::Type::kInt32,
- /* is_volatile */ false,
- /* is_ordered */ true,
+ /* is_volatile= */ false,
+ /* is_ordered= */ true,
codegen_);
}
void IntrinsicCodeGeneratorARMVIXL::VisitUnsafePutVolatile(HInvoke* invoke) {
GenUnsafePut(invoke->GetLocations(),
DataType::Type::kInt32,
- /* is_volatile */ true,
- /* is_ordered */ false,
+ /* is_volatile= */ true,
+ /* is_ordered= */ false,
codegen_);
}
void IntrinsicCodeGeneratorARMVIXL::VisitUnsafePutObject(HInvoke* invoke) {
GenUnsafePut(invoke->GetLocations(),
DataType::Type::kReference,
- /* is_volatile */ false,
- /* is_ordered */ false,
+ /* is_volatile= */ false,
+ /* is_ordered= */ false,
codegen_);
}
void IntrinsicCodeGeneratorARMVIXL::VisitUnsafePutObjectOrdered(HInvoke* invoke) {
GenUnsafePut(invoke->GetLocations(),
DataType::Type::kReference,
- /* is_volatile */ false,
- /* is_ordered */ true,
+ /* is_volatile= */ false,
+ /* is_ordered= */ true,
codegen_);
}
void IntrinsicCodeGeneratorARMVIXL::VisitUnsafePutObjectVolatile(HInvoke* invoke) {
GenUnsafePut(invoke->GetLocations(),
DataType::Type::kReference,
- /* is_volatile */ true,
- /* is_ordered */ false,
+ /* is_volatile= */ true,
+ /* is_ordered= */ false,
codegen_);
}
void IntrinsicCodeGeneratorARMVIXL::VisitUnsafePutLong(HInvoke* invoke) {
GenUnsafePut(invoke->GetLocations(),
DataType::Type::kInt64,
- /* is_volatile */ false,
- /* is_ordered */ false,
+ /* is_volatile= */ false,
+ /* is_ordered= */ false,
codegen_);
}
void IntrinsicCodeGeneratorARMVIXL::VisitUnsafePutLongOrdered(HInvoke* invoke) {
GenUnsafePut(invoke->GetLocations(),
DataType::Type::kInt64,
- /* is_volatile */ false,
- /* is_ordered */ true,
+ /* is_volatile= */ false,
+ /* is_ordered= */ true,
codegen_);
}
void IntrinsicCodeGeneratorARMVIXL::VisitUnsafePutLongVolatile(HInvoke* invoke) {
GenUnsafePut(invoke->GetLocations(),
DataType::Type::kInt64,
- /* is_volatile */ true,
- /* is_ordered */ false,
+ /* is_volatile= */ true,
+ /* is_ordered= */ false,
codegen_);
}
@@ -1026,7 +1026,7 @@
__ Strex(tmp, value, MemOperand(tmp_ptr));
assembler->MaybeUnpoisonHeapReference(value);
__ Cmp(tmp, 0);
- __ B(ne, &loop_head, /* far_target */ false);
+ __ B(ne, &loop_head, /* is_far_target= */ false);
__ B(GetExitLabel());
}
};
@@ -1102,7 +1102,7 @@
assembler->MaybeUnpoisonHeapReference(value);
}
__ Cmp(tmp, 0);
- __ B(ne, &loop_head, /* far_target */ false);
+ __ B(ne, &loop_head, /* is_far_target= */ false);
__ Bind(loop_exit);
@@ -1113,7 +1113,7 @@
__ Lsr(out, out, WhichPowerOf2(out.GetSizeInBits()));
if (type == DataType::Type::kReference) {
- codegen->MaybeGenerateMarkingRegisterCheck(/* code */ 128);
+ codegen->MaybeGenerateMarkingRegisterCheck(/* code= */ 128);
}
}
@@ -1308,23 +1308,23 @@
__ Ldr(temp_reg, MemOperand(str, temp1));
__ Ldr(temp2, MemOperand(arg, temp1));
__ Cmp(temp_reg, temp2);
- __ B(ne, &find_char_diff, /* far_target */ false);
+ __ B(ne, &find_char_diff, /* is_far_target= */ false);
__ Add(temp1, temp1, char_size * 2);
__ Ldr(temp_reg, MemOperand(str, temp1));
__ Ldr(temp2, MemOperand(arg, temp1));
__ Cmp(temp_reg, temp2);
- __ B(ne, &find_char_diff_2nd_cmp, /* far_target */ false);
+ __ B(ne, &find_char_diff_2nd_cmp, /* is_far_target= */ false);
__ Add(temp1, temp1, char_size * 2);
// With string compression, we have compared 8 bytes, otherwise 4 chars.
__ Subs(temp0, temp0, (mirror::kUseStringCompression ? 8 : 4));
- __ B(hi, &loop, /* far_target */ false);
+ __ B(hi, &loop, /* is_far_target= */ false);
__ B(end);
__ Bind(&find_char_diff_2nd_cmp);
if (mirror::kUseStringCompression) {
__ Subs(temp0, temp0, 4); // 4 bytes previously compared.
- __ B(ls, end, /* far_target */ false); // Was the second comparison fully beyond the end?
+ __ B(ls, end, /* is_far_target= */ false); // Was the second comparison fully beyond the end?
} else {
// Without string compression, we can start treating temp0 as signed
// and rely on the signed comparison below.
@@ -1352,7 +1352,7 @@
// the remaining string data, so just return length diff (out).
// The comparison is unsigned for string compression, otherwise signed.
__ Cmp(temp0, Operand(temp1, vixl32::LSR, (mirror::kUseStringCompression ? 3 : 4)));
- __ B((mirror::kUseStringCompression ? ls : le), end, /* far_target */ false);
+ __ B((mirror::kUseStringCompression ? ls : le), end, /* is_far_target= */ false);
// Extract the characters and calculate the difference.
if (mirror::kUseStringCompression) {
@@ -1419,9 +1419,9 @@
__ Ldrb(temp_reg, MemOperand(temp1, c_char_size, PostIndex));
__ Ldrh(temp3, MemOperand(temp2, char_size, PostIndex));
__ Cmp(temp_reg, temp3);
- __ B(ne, &different_compression_diff, /* far_target */ false);
+ __ B(ne, &different_compression_diff, /* is_far_target= */ false);
__ Subs(temp0, temp0, 2);
- __ B(hi, &different_compression_loop, /* far_target */ false);
+ __ B(hi, &different_compression_loop, /* is_far_target= */ false);
__ B(end);
// Calculate the difference.
@@ -1517,12 +1517,12 @@
StringEqualsOptimizations optimizations(invoke);
if (!optimizations.GetArgumentNotNull()) {
// Check if input is null, return false if it is.
- __ CompareAndBranchIfZero(arg, &return_false, /* far_target */ false);
+ __ CompareAndBranchIfZero(arg, &return_false, /* is_far_target= */ false);
}
// Reference equality check, return true if same reference.
__ Cmp(str, arg);
- __ B(eq, &return_true, /* far_target */ false);
+ __ B(eq, &return_true, /* is_far_target= */ false);
if (!optimizations.GetArgumentIsString()) {
// Instanceof check for the argument by comparing class fields.
@@ -1532,7 +1532,7 @@
__ Ldr(temp, MemOperand(str, class_offset));
__ Ldr(out, MemOperand(arg, class_offset));
__ Cmp(temp, out);
- __ B(ne, &return_false, /* far_target */ false);
+ __ B(ne, &return_false, /* is_far_target= */ false);
}
// Check if one of the inputs is a const string. Do not special-case both strings
@@ -1555,7 +1555,7 @@
// Also compares the compression style, if differs return false.
__ Ldr(temp, MemOperand(arg, count_offset));
__ Cmp(temp, Operand(mirror::String::GetFlaggedCount(const_string_length, is_compressed)));
- __ B(ne, &return_false, /* far_target */ false);
+ __ B(ne, &return_false, /* is_far_target= */ false);
} else {
// Load `count` fields of this and argument strings.
__ Ldr(temp, MemOperand(str, count_offset));
@@ -1563,7 +1563,7 @@
// Check if `count` fields are equal, return false if they're not.
// Also compares the compression style, if differs return false.
__ Cmp(temp, out);
- __ B(ne, &return_false, /* far_target */ false);
+ __ B(ne, &return_false, /* is_far_target= */ false);
}
// Assertions that must hold in order to compare strings 4 bytes at a time.
@@ -1586,9 +1586,9 @@
__ Ldrd(temp, temp1, MemOperand(str, offset));
__ Ldrd(temp2, out, MemOperand(arg, offset));
__ Cmp(temp, temp2);
- __ B(ne, &return_false, /* far_label */ false);
+ __ B(ne, &return_false, /* is_far_target= */ false);
__ Cmp(temp1, out);
- __ B(ne, &return_false, /* far_label */ false);
+ __ B(ne, &return_false, /* is_far_target= */ false);
offset += 2u * sizeof(uint32_t);
remaining_bytes -= 2u * sizeof(uint32_t);
}
@@ -1596,13 +1596,13 @@
__ Ldr(temp, MemOperand(str, offset));
__ Ldr(out, MemOperand(arg, offset));
__ Cmp(temp, out);
- __ B(ne, &return_false, /* far_label */ false);
+ __ B(ne, &return_false, /* is_far_target= */ false);
}
} else {
// Return true if both strings are empty. Even with string compression `count == 0` means empty.
static_assert(static_cast<uint32_t>(mirror::StringCompressionFlag::kCompressed) == 0u,
"Expecting 0=compressed, 1=uncompressed");
- __ CompareAndBranchIfZero(temp, &return_true, /* far_target */ false);
+ __ CompareAndBranchIfZero(temp, &return_true, /* is_far_target= */ false);
if (mirror::kUseStringCompression) {
// For string compression, calculate the number of bytes to compare (not chars).
@@ -1628,10 +1628,10 @@
__ Ldr(temp2, MemOperand(arg, temp1));
__ Add(temp1, temp1, Operand::From(sizeof(uint32_t)));
__ Cmp(out, temp2);
- __ B(ne, &return_false, /* far_target */ false);
+ __ B(ne, &return_false, /* is_far_target= */ false);
// With string compression, we have compared 4 bytes, otherwise 2 chars.
__ Subs(temp, temp, mirror::kUseStringCompression ? 4 : 2);
- __ B(hi, &loop, /* far_target */ false);
+ __ B(hi, &loop, /* is_far_target= */ false);
}
// Return true and exit the function.
@@ -1712,7 +1712,7 @@
}
void IntrinsicCodeGeneratorARMVIXL::VisitStringIndexOf(HInvoke* invoke) {
- GenerateVisitStringIndexOf(invoke, GetAssembler(), codegen_, /* start_at_zero */ true);
+ GenerateVisitStringIndexOf(invoke, GetAssembler(), codegen_, /* start_at_zero= */ true);
}
void IntrinsicLocationsBuilderARMVIXL::VisitStringIndexOfAfter(HInvoke* invoke) {
@@ -1728,7 +1728,7 @@
}
void IntrinsicCodeGeneratorARMVIXL::VisitStringIndexOfAfter(HInvoke* invoke) {
- GenerateVisitStringIndexOf(invoke, GetAssembler(), codegen_, /* start_at_zero */ false);
+ GenerateVisitStringIndexOf(invoke, GetAssembler(), codegen_, /* start_at_zero= */ false);
}
void IntrinsicLocationsBuilderARMVIXL::VisitStringNewStringFromBytes(HInvoke* invoke) {
@@ -1950,7 +1950,7 @@
} else {
if (!optimizations.GetDestinationIsSource()) {
__ Cmp(src, dest);
- __ B(ne, &conditions_on_positions_validated, /* far_target */ false);
+ __ B(ne, &conditions_on_positions_validated, /* is_far_target= */ false);
}
__ Cmp(RegisterFrom(dest_pos), src_pos_constant);
__ B(gt, intrinsic_slow_path->GetEntryLabel());
@@ -1958,7 +1958,7 @@
} else {
if (!optimizations.GetDestinationIsSource()) {
__ Cmp(src, dest);
- __ B(ne, &conditions_on_positions_validated, /* far_target */ false);
+ __ B(ne, &conditions_on_positions_validated, /* is_far_target= */ false);
}
if (dest_pos.IsConstant()) {
int32_t dest_pos_constant = Int32ConstantFrom(dest_pos);
@@ -2018,11 +2018,11 @@
if (!optimizations.GetSourceIsNonPrimitiveArray()) {
// /* HeapReference<Class> */ temp1 = src->klass_
codegen_->GenerateFieldLoadWithBakerReadBarrier(
- invoke, temp1_loc, src, class_offset, temp2_loc, /* needs_null_check */ false);
+ invoke, temp1_loc, src, class_offset, temp2_loc, /* needs_null_check= */ false);
// Bail out if the source is not a non primitive array.
// /* HeapReference<Class> */ temp1 = temp1->component_type_
codegen_->GenerateFieldLoadWithBakerReadBarrier(
- invoke, temp1_loc, temp1, component_offset, temp2_loc, /* needs_null_check */ false);
+ invoke, temp1_loc, temp1, component_offset, temp2_loc, /* needs_null_check= */ false);
__ CompareAndBranchIfZero(temp1, intrinsic_slow_path->GetEntryLabel());
// If heap poisoning is enabled, `temp1` has been unpoisoned
// by the the previous call to GenerateFieldLoadWithBakerReadBarrier.
@@ -2034,7 +2034,7 @@
// /* HeapReference<Class> */ temp1 = dest->klass_
codegen_->GenerateFieldLoadWithBakerReadBarrier(
- invoke, temp1_loc, dest, class_offset, temp2_loc, /* needs_null_check */ false);
+ invoke, temp1_loc, dest, class_offset, temp2_loc, /* needs_null_check= */ false);
if (!optimizations.GetDestinationIsNonPrimitiveArray()) {
// Bail out if the destination is not a non primitive array.
@@ -2046,7 +2046,7 @@
// temporaries such a `temp1`.
// /* HeapReference<Class> */ temp2 = temp1->component_type_
codegen_->GenerateFieldLoadWithBakerReadBarrier(
- invoke, temp2_loc, temp1, component_offset, temp3_loc, /* needs_null_check */ false);
+ invoke, temp2_loc, temp1, component_offset, temp3_loc, /* needs_null_check= */ false);
__ CompareAndBranchIfZero(temp2, intrinsic_slow_path->GetEntryLabel());
// If heap poisoning is enabled, `temp2` has been unpoisoned
// by the the previous call to GenerateFieldLoadWithBakerReadBarrier.
@@ -2060,16 +2060,16 @@
// read barrier emitted by GenerateFieldLoadWithBakerReadBarrier below.
// /* HeapReference<Class> */ temp2 = src->klass_
codegen_->GenerateFieldLoadWithBakerReadBarrier(
- invoke, temp2_loc, src, class_offset, temp3_loc, /* needs_null_check */ false);
+ invoke, temp2_loc, src, class_offset, temp3_loc, /* needs_null_check= */ false);
// Note: if heap poisoning is on, we are comparing two unpoisoned references here.
__ Cmp(temp1, temp2);
if (optimizations.GetDestinationIsTypedObjectArray()) {
vixl32::Label do_copy;
- __ B(eq, &do_copy, /* far_target */ false);
+ __ B(eq, &do_copy, /* is_far_target= */ false);
// /* HeapReference<Class> */ temp1 = temp1->component_type_
codegen_->GenerateFieldLoadWithBakerReadBarrier(
- invoke, temp1_loc, temp1, component_offset, temp2_loc, /* needs_null_check */ false);
+ invoke, temp1_loc, temp1, component_offset, temp2_loc, /* needs_null_check= */ false);
// /* HeapReference<Class> */ temp1 = temp1->super_class_
// We do not need to emit a read barrier for the following
// heap reference load, as `temp1` is only used in a
@@ -2126,7 +2126,7 @@
if (optimizations.GetDestinationIsTypedObjectArray()) {
vixl32::Label do_copy;
- __ B(eq, &do_copy, /* far_target */ false);
+ __ B(eq, &do_copy, /* is_far_target= */ false);
if (!did_unpoison) {
assembler->MaybeUnpoisonHeapReference(temp1);
}
@@ -2148,10 +2148,10 @@
if (kEmitCompilerReadBarrier && kUseBakerReadBarrier) {
// /* HeapReference<Class> */ temp1 = src->klass_
codegen_->GenerateFieldLoadWithBakerReadBarrier(
- invoke, temp1_loc, src, class_offset, temp2_loc, /* needs_null_check */ false);
+ invoke, temp1_loc, src, class_offset, temp2_loc, /* needs_null_check= */ false);
// /* HeapReference<Class> */ temp3 = temp1->component_type_
codegen_->GenerateFieldLoadWithBakerReadBarrier(
- invoke, temp3_loc, temp1, component_offset, temp2_loc, /* needs_null_check */ false);
+ invoke, temp3_loc, temp1, component_offset, temp2_loc, /* needs_null_check= */ false);
__ CompareAndBranchIfZero(temp3, intrinsic_slow_path->GetEntryLabel());
// If heap poisoning is enabled, `temp3` has been unpoisoned
// by the the previous call to GenerateFieldLoadWithBakerReadBarrier.
@@ -2179,7 +2179,7 @@
if (length.IsRegister()) {
// Don't enter the copy loop if the length is null.
- __ CompareAndBranchIfZero(RegisterFrom(length), &done, /* is_far_target */ false);
+ __ CompareAndBranchIfZero(RegisterFrom(length), &done, /* is_far_target= */ false);
}
if (kEmitCompilerReadBarrier && kUseBakerReadBarrier) {
@@ -2256,7 +2256,7 @@
__ Str(temp_reg, MemOperand(temp2, element_size, PostIndex));
}
__ Cmp(temp1, temp3);
- __ B(ne, &loop, /* far_target */ false);
+ __ B(ne, &loop, /* is_far_target= */ false);
__ Bind(read_barrier_slow_path->GetExitLabel());
} else {
@@ -2278,13 +2278,13 @@
__ Str(temp_reg, MemOperand(temp2, element_size, PostIndex));
}
__ Cmp(temp1, temp3);
- __ B(ne, &loop, /* far_target */ false);
+ __ B(ne, &loop, /* is_far_target= */ false);
}
__ Bind(&done);
}
// We only need one card marking on the destination array.
- codegen_->MarkGCCard(temp1, temp2, dest, NoReg, /* value_can_be_null */ false);
+ codegen_->MarkGCCard(temp1, temp2, dest, NoReg, /* can_be_null= */ false);
__ Bind(intrinsic_slow_path->GetExitLabel());
}
@@ -2814,7 +2814,7 @@
__ Subs(num_chr, srcEnd, srcBegin);
// Early out for valid zero-length retrievals.
- __ B(eq, final_label, /* far_target */ false);
+ __ B(eq, final_label, /* is_far_target= */ false);
// src range to copy.
__ Add(src_ptr, srcObj, value_offset);
@@ -2830,7 +2830,7 @@
__ Ldr(temp, MemOperand(srcObj, count_offset));
__ Tst(temp, 1);
temps.Release(temp);
- __ B(eq, &compressed_string_preloop, /* far_target */ false);
+ __ B(eq, &compressed_string_preloop, /* is_far_target= */ false);
}
__ Add(src_ptr, src_ptr, Operand(srcBegin, vixl32::LSL, 1));
@@ -2840,7 +2840,7 @@
temp = temps.Acquire();
// Save repairing the value of num_chr on the < 4 character path.
__ Subs(temp, num_chr, 4);
- __ B(lt, &remainder, /* far_target */ false);
+ __ B(lt, &remainder, /* is_far_target= */ false);
// Keep the result of the earlier subs, we are going to fetch at least 4 characters.
__ Mov(num_chr, temp);
@@ -2855,10 +2855,10 @@
__ Ldr(temp, MemOperand(src_ptr, char_size * 4, PostIndex));
__ Str(temp, MemOperand(dst_ptr, char_size * 4, PostIndex));
temps.Release(temp);
- __ B(ge, &loop, /* far_target */ false);
+ __ B(ge, &loop, /* is_far_target= */ false);
__ Adds(num_chr, num_chr, 4);
- __ B(eq, final_label, /* far_target */ false);
+ __ B(eq, final_label, /* is_far_target= */ false);
// Main loop for < 4 character case and remainder handling. Loads and stores one
// 16-bit Java character at a time.
@@ -2868,7 +2868,7 @@
__ Subs(num_chr, num_chr, 1);
__ Strh(temp, MemOperand(dst_ptr, char_size, PostIndex));
temps.Release(temp);
- __ B(gt, &remainder, /* far_target */ false);
+ __ B(gt, &remainder, /* is_far_target= */ false);
if (mirror::kUseStringCompression) {
__ B(final_label);
@@ -2884,7 +2884,7 @@
__ Strh(temp, MemOperand(dst_ptr, char_size, PostIndex));
temps.Release(temp);
__ Subs(num_chr, num_chr, 1);
- __ B(gt, &compressed_string_loop, /* far_target */ false);
+ __ B(gt, &compressed_string_loop, /* is_far_target= */ false);
}
if (done.IsReferenced()) {
@@ -3004,7 +3004,7 @@
__ Add(out, in, -info.low);
__ Cmp(out, info.length);
vixl32::Label allocate, done;
- __ B(hs, &allocate, /* is_far_target */ false);
+ __ B(hs, &allocate, /* is_far_target= */ false);
// If the value is within the bounds, load the j.l.Integer directly from the array.
codegen_->LoadBootImageAddress(temp, info.array_data_boot_image_reference);
codegen_->LoadFromShiftedRegOffset(DataType::Type::kReference, locations->Out(), temp, out);
@@ -3037,7 +3037,7 @@
vixl32::Register temp = temps.Acquire();
vixl32::Label done;
vixl32::Label* const final_label = codegen_->GetFinalLabel(invoke, &done);
- __ CompareAndBranchIfZero(out, final_label, /* far_target */ false);
+ __ CompareAndBranchIfZero(out, final_label, /* is_far_target= */ false);
__ Dmb(vixl32::ISH);
__ Mov(temp, 0);
assembler->StoreToOffset(kStoreWord, temp, tr, offset);
@@ -3061,6 +3061,7 @@
UNIMPLEMENTED_INTRINSIC(ARMVIXL, ReferenceGetReferent)
UNIMPLEMENTED_INTRINSIC(ARMVIXL, CRC32Update)
UNIMPLEMENTED_INTRINSIC(ARMVIXL, CRC32UpdateBytes)
+UNIMPLEMENTED_INTRINSIC(ARMVIXL, CRC32UpdateByteBuffer)
UNIMPLEMENTED_INTRINSIC(ARMVIXL, StringStringIndexOf);
UNIMPLEMENTED_INTRINSIC(ARMVIXL, StringStringIndexOfAfter);
diff --git a/compiler/optimizing/intrinsics_mips.cc b/compiler/optimizing/intrinsics_mips.cc
index 08ba0a0..5b35974 100644
--- a/compiler/optimizing/intrinsics_mips.cc
+++ b/compiler/optimizing/intrinsics_mips.cc
@@ -185,7 +185,7 @@
}
void IntrinsicCodeGeneratorMIPS::VisitDoubleDoubleToRawLongBits(HInvoke* invoke) {
- MoveFPToInt(invoke->GetLocations(), /* is64bit */ true, GetAssembler());
+ MoveFPToInt(invoke->GetLocations(), /* is64bit= */ true, GetAssembler());
}
// int java.lang.Float.floatToRawIntBits(float)
@@ -194,7 +194,7 @@
}
void IntrinsicCodeGeneratorMIPS::VisitFloatFloatToRawIntBits(HInvoke* invoke) {
- MoveFPToInt(invoke->GetLocations(), /* is64bit */ false, GetAssembler());
+ MoveFPToInt(invoke->GetLocations(), /* is64bit= */ false, GetAssembler());
}
static void CreateIntToFPLocations(ArenaAllocator* allocator, HInvoke* invoke) {
@@ -226,7 +226,7 @@
}
void IntrinsicCodeGeneratorMIPS::VisitDoubleLongBitsToDouble(HInvoke* invoke) {
- MoveIntToFP(invoke->GetLocations(), /* is64bit */ true, GetAssembler());
+ MoveIntToFP(invoke->GetLocations(), /* is64bit= */ true, GetAssembler());
}
// float java.lang.Float.intBitsToFloat(int)
@@ -235,7 +235,7 @@
}
void IntrinsicCodeGeneratorMIPS::VisitFloatIntBitsToFloat(HInvoke* invoke) {
- MoveIntToFP(invoke->GetLocations(), /* is64bit */ false, GetAssembler());
+ MoveIntToFP(invoke->GetLocations(), /* is64bit= */ false, GetAssembler());
}
static void CreateIntToIntLocations(ArenaAllocator* allocator,
@@ -411,7 +411,7 @@
DataType::Type::kInt32,
IsR2OrNewer(),
IsR6(),
- /* reverseBits */ false,
+ /* reverseBits= */ false,
GetAssembler());
}
@@ -425,7 +425,7 @@
DataType::Type::kInt64,
IsR2OrNewer(),
IsR6(),
- /* reverseBits */ false,
+ /* reverseBits= */ false,
GetAssembler());
}
@@ -439,7 +439,7 @@
DataType::Type::kInt16,
IsR2OrNewer(),
IsR6(),
- /* reverseBits */ false,
+ /* reverseBits= */ false,
GetAssembler());
}
@@ -479,7 +479,7 @@
}
void IntrinsicCodeGeneratorMIPS::VisitIntegerNumberOfLeadingZeros(HInvoke* invoke) {
- GenNumberOfLeadingZeroes(invoke->GetLocations(), /* is64bit */ false, IsR6(), GetAssembler());
+ GenNumberOfLeadingZeroes(invoke->GetLocations(), /* is64bit= */ false, IsR6(), GetAssembler());
}
// int java.lang.Long.numberOfLeadingZeros(long i)
@@ -488,7 +488,7 @@
}
void IntrinsicCodeGeneratorMIPS::VisitLongNumberOfLeadingZeros(HInvoke* invoke) {
- GenNumberOfLeadingZeroes(invoke->GetLocations(), /* is64bit */ true, IsR6(), GetAssembler());
+ GenNumberOfLeadingZeroes(invoke->GetLocations(), /* is64bit= */ true, IsR6(), GetAssembler());
}
static void GenNumberOfTrailingZeroes(LocationSummary* locations,
@@ -566,7 +566,7 @@
}
void IntrinsicCodeGeneratorMIPS::VisitIntegerNumberOfTrailingZeros(HInvoke* invoke) {
- GenNumberOfTrailingZeroes(invoke->GetLocations(), /* is64bit */ false, IsR6(), GetAssembler());
+ GenNumberOfTrailingZeroes(invoke->GetLocations(), /* is64bit= */ false, IsR6(), GetAssembler());
}
// int java.lang.Long.numberOfTrailingZeros(long i)
@@ -575,7 +575,7 @@
}
void IntrinsicCodeGeneratorMIPS::VisitLongNumberOfTrailingZeros(HInvoke* invoke) {
- GenNumberOfTrailingZeroes(invoke->GetLocations(), /* is64bit */ true, IsR6(), GetAssembler());
+ GenNumberOfTrailingZeroes(invoke->GetLocations(), /* is64bit= */ true, IsR6(), GetAssembler());
}
// int java.lang.Integer.reverse(int)
@@ -588,7 +588,7 @@
DataType::Type::kInt32,
IsR2OrNewer(),
IsR6(),
- /* reverseBits */ true,
+ /* reverseBits= */ true,
GetAssembler());
}
@@ -602,7 +602,7 @@
DataType::Type::kInt64,
IsR2OrNewer(),
IsR6(),
- /* reverseBits */ true,
+ /* reverseBits= */ true,
GetAssembler());
}
@@ -1055,11 +1055,11 @@
codegen->GenerateReferenceLoadWithBakerReadBarrier(invoke,
trg_loc,
base,
- /* offset */ 0U,
- /* index */ offset_loc,
+ /* offset= */ 0U,
+ /* index= */ offset_loc,
TIMES_1,
temp,
- /* needs_null_check */ false);
+ /* needs_null_check= */ false);
if (is_volatile) {
__ Sync(0);
}
@@ -1077,8 +1077,8 @@
trg_loc,
trg_loc,
base_loc,
- /* offset */ 0U,
- /* index */ offset_loc);
+ /* offset= */ 0U,
+ /* index= */ offset_loc);
}
} else {
if (is_R6) {
@@ -1107,7 +1107,7 @@
}
void IntrinsicCodeGeneratorMIPS::VisitUnsafeGet(HInvoke* invoke) {
- GenUnsafeGet(invoke, DataType::Type::kInt32, /* is_volatile */ false, IsR6(), codegen_);
+ GenUnsafeGet(invoke, DataType::Type::kInt32, /* is_volatile= */ false, IsR6(), codegen_);
}
// int sun.misc.Unsafe.getIntVolatile(Object o, long offset)
@@ -1116,7 +1116,7 @@
}
void IntrinsicCodeGeneratorMIPS::VisitUnsafeGetVolatile(HInvoke* invoke) {
- GenUnsafeGet(invoke, DataType::Type::kInt32, /* is_volatile */ true, IsR6(), codegen_);
+ GenUnsafeGet(invoke, DataType::Type::kInt32, /* is_volatile= */ true, IsR6(), codegen_);
}
// long sun.misc.Unsafe.getLong(Object o, long offset)
@@ -1125,7 +1125,7 @@
}
void IntrinsicCodeGeneratorMIPS::VisitUnsafeGetLong(HInvoke* invoke) {
- GenUnsafeGet(invoke, DataType::Type::kInt64, /* is_volatile */ false, IsR6(), codegen_);
+ GenUnsafeGet(invoke, DataType::Type::kInt64, /* is_volatile= */ false, IsR6(), codegen_);
}
// Object sun.misc.Unsafe.getObject(Object o, long offset)
@@ -1134,7 +1134,7 @@
}
void IntrinsicCodeGeneratorMIPS::VisitUnsafeGetObject(HInvoke* invoke) {
- GenUnsafeGet(invoke, DataType::Type::kReference, /* is_volatile */ false, IsR6(), codegen_);
+ GenUnsafeGet(invoke, DataType::Type::kReference, /* is_volatile= */ false, IsR6(), codegen_);
}
// Object sun.misc.Unsafe.getObjectVolatile(Object o, long offset)
@@ -1143,7 +1143,7 @@
}
void IntrinsicCodeGeneratorMIPS::VisitUnsafeGetObjectVolatile(HInvoke* invoke) {
- GenUnsafeGet(invoke, DataType::Type::kReference, /* is_volatile */ true, IsR6(), codegen_);
+ GenUnsafeGet(invoke, DataType::Type::kReference, /* is_volatile= */ true, IsR6(), codegen_);
}
static void CreateIntIntIntIntToVoidLocations(ArenaAllocator* allocator, HInvoke* invoke) {
@@ -1225,8 +1225,8 @@
void IntrinsicCodeGeneratorMIPS::VisitUnsafePut(HInvoke* invoke) {
GenUnsafePut(invoke->GetLocations(),
DataType::Type::kInt32,
- /* is_volatile */ false,
- /* is_ordered */ false,
+ /* is_volatile= */ false,
+ /* is_ordered= */ false,
IsR6(),
codegen_);
}
@@ -1239,8 +1239,8 @@
void IntrinsicCodeGeneratorMIPS::VisitUnsafePutOrdered(HInvoke* invoke) {
GenUnsafePut(invoke->GetLocations(),
DataType::Type::kInt32,
- /* is_volatile */ false,
- /* is_ordered */ true,
+ /* is_volatile= */ false,
+ /* is_ordered= */ true,
IsR6(),
codegen_);
}
@@ -1253,8 +1253,8 @@
void IntrinsicCodeGeneratorMIPS::VisitUnsafePutVolatile(HInvoke* invoke) {
GenUnsafePut(invoke->GetLocations(),
DataType::Type::kInt32,
- /* is_volatile */ true,
- /* is_ordered */ false,
+ /* is_volatile= */ true,
+ /* is_ordered= */ false,
IsR6(),
codegen_);
}
@@ -1267,8 +1267,8 @@
void IntrinsicCodeGeneratorMIPS::VisitUnsafePutObject(HInvoke* invoke) {
GenUnsafePut(invoke->GetLocations(),
DataType::Type::kReference,
- /* is_volatile */ false,
- /* is_ordered */ false,
+ /* is_volatile= */ false,
+ /* is_ordered= */ false,
IsR6(),
codegen_);
}
@@ -1281,8 +1281,8 @@
void IntrinsicCodeGeneratorMIPS::VisitUnsafePutObjectOrdered(HInvoke* invoke) {
GenUnsafePut(invoke->GetLocations(),
DataType::Type::kReference,
- /* is_volatile */ false,
- /* is_ordered */ true,
+ /* is_volatile= */ false,
+ /* is_ordered= */ true,
IsR6(),
codegen_);
}
@@ -1295,8 +1295,8 @@
void IntrinsicCodeGeneratorMIPS::VisitUnsafePutObjectVolatile(HInvoke* invoke) {
GenUnsafePut(invoke->GetLocations(),
DataType::Type::kReference,
- /* is_volatile */ true,
- /* is_ordered */ false,
+ /* is_volatile= */ true,
+ /* is_ordered= */ false,
IsR6(),
codegen_);
}
@@ -1309,8 +1309,8 @@
void IntrinsicCodeGeneratorMIPS::VisitUnsafePutLong(HInvoke* invoke) {
GenUnsafePut(invoke->GetLocations(),
DataType::Type::kInt64,
- /* is_volatile */ false,
- /* is_ordered */ false,
+ /* is_volatile= */ false,
+ /* is_ordered= */ false,
IsR6(),
codegen_);
}
@@ -1323,8 +1323,8 @@
void IntrinsicCodeGeneratorMIPS::VisitUnsafePutLongOrdered(HInvoke* invoke) {
GenUnsafePut(invoke->GetLocations(),
DataType::Type::kInt64,
- /* is_volatile */ false,
- /* is_ordered */ true,
+ /* is_volatile= */ false,
+ /* is_ordered= */ true,
IsR6(),
codegen_);
}
@@ -1388,12 +1388,12 @@
invoke,
out_loc, // Unused, used only as a "temporary" within the read barrier.
base,
- /* offset */ 0u,
- /* index */ offset_loc,
+ /* offset= */ 0u,
+ /* index= */ offset_loc,
ScaleFactor::TIMES_1,
temp,
- /* needs_null_check */ false,
- /* always_update_field */ true);
+ /* needs_null_check= */ false,
+ /* always_update_field= */ true);
}
}
@@ -1706,7 +1706,7 @@
}
void IntrinsicCodeGeneratorMIPS::VisitStringIndexOf(HInvoke* invoke) {
- GenerateStringIndexOf(invoke, /* start_at_zero */ true, GetAssembler(), codegen_);
+ GenerateStringIndexOf(invoke, /* start_at_zero= */ true, GetAssembler(), codegen_);
}
// int java.lang.String.indexOf(int ch, int fromIndex)
@@ -1727,7 +1727,7 @@
}
void IntrinsicCodeGeneratorMIPS::VisitStringIndexOfAfter(HInvoke* invoke) {
- GenerateStringIndexOf(invoke, /* start_at_zero */ false, GetAssembler(), codegen_);
+ GenerateStringIndexOf(invoke, /* start_at_zero= */ false, GetAssembler(), codegen_);
}
// java.lang.StringFactory.newStringFromBytes(byte[] data, int high, int offset, int byteCount)
@@ -2698,6 +2698,7 @@
UNIMPLEMENTED_INTRINSIC(MIPS, CRC32Update)
UNIMPLEMENTED_INTRINSIC(MIPS, CRC32UpdateBytes)
+UNIMPLEMENTED_INTRINSIC(MIPS, CRC32UpdateByteBuffer)
UNIMPLEMENTED_INTRINSIC(MIPS, StringStringIndexOf);
UNIMPLEMENTED_INTRINSIC(MIPS, StringStringIndexOfAfter);
diff --git a/compiler/optimizing/intrinsics_mips64.cc b/compiler/optimizing/intrinsics_mips64.cc
index 59d3ba2..afaa4ca 100644
--- a/compiler/optimizing/intrinsics_mips64.cc
+++ b/compiler/optimizing/intrinsics_mips64.cc
@@ -169,7 +169,7 @@
}
void IntrinsicCodeGeneratorMIPS64::VisitDoubleDoubleToRawLongBits(HInvoke* invoke) {
- MoveFPToInt(invoke->GetLocations(), /* is64bit */ true, GetAssembler());
+ MoveFPToInt(invoke->GetLocations(), /* is64bit= */ true, GetAssembler());
}
// int java.lang.Float.floatToRawIntBits(float)
@@ -178,7 +178,7 @@
}
void IntrinsicCodeGeneratorMIPS64::VisitFloatFloatToRawIntBits(HInvoke* invoke) {
- MoveFPToInt(invoke->GetLocations(), /* is64bit */ false, GetAssembler());
+ MoveFPToInt(invoke->GetLocations(), /* is64bit= */ false, GetAssembler());
}
static void CreateIntToFPLocations(ArenaAllocator* allocator, HInvoke* invoke) {
@@ -205,7 +205,7 @@
}
void IntrinsicCodeGeneratorMIPS64::VisitDoubleLongBitsToDouble(HInvoke* invoke) {
- MoveIntToFP(invoke->GetLocations(), /* is64bit */ true, GetAssembler());
+ MoveIntToFP(invoke->GetLocations(), /* is64bit= */ true, GetAssembler());
}
// float java.lang.Float.intBitsToFloat(int)
@@ -214,7 +214,7 @@
}
void IntrinsicCodeGeneratorMIPS64::VisitFloatIntBitsToFloat(HInvoke* invoke) {
- MoveIntToFP(invoke->GetLocations(), /* is64bit */ false, GetAssembler());
+ MoveIntToFP(invoke->GetLocations(), /* is64bit= */ false, GetAssembler());
}
static void CreateIntToIntLocations(ArenaAllocator* allocator, HInvoke* invoke) {
@@ -295,7 +295,7 @@
}
void IntrinsicCodeGeneratorMIPS64::VisitIntegerNumberOfLeadingZeros(HInvoke* invoke) {
- GenNumberOfLeadingZeroes(invoke->GetLocations(), /* is64bit */ false, GetAssembler());
+ GenNumberOfLeadingZeroes(invoke->GetLocations(), /* is64bit= */ false, GetAssembler());
}
// int java.lang.Long.numberOfLeadingZeros(long i)
@@ -304,7 +304,7 @@
}
void IntrinsicCodeGeneratorMIPS64::VisitLongNumberOfLeadingZeros(HInvoke* invoke) {
- GenNumberOfLeadingZeroes(invoke->GetLocations(), /* is64bit */ true, GetAssembler());
+ GenNumberOfLeadingZeroes(invoke->GetLocations(), /* is64bit= */ true, GetAssembler());
}
static void GenNumberOfTrailingZeroes(LocationSummary* locations,
@@ -332,7 +332,7 @@
}
void IntrinsicCodeGeneratorMIPS64::VisitIntegerNumberOfTrailingZeros(HInvoke* invoke) {
- GenNumberOfTrailingZeroes(invoke->GetLocations(), /* is64bit */ false, GetAssembler());
+ GenNumberOfTrailingZeroes(invoke->GetLocations(), /* is64bit= */ false, GetAssembler());
}
// int java.lang.Long.numberOfTrailingZeros(long i)
@@ -341,7 +341,7 @@
}
void IntrinsicCodeGeneratorMIPS64::VisitLongNumberOfTrailingZeros(HInvoke* invoke) {
- GenNumberOfTrailingZeroes(invoke->GetLocations(), /* is64bit */ true, GetAssembler());
+ GenNumberOfTrailingZeroes(invoke->GetLocations(), /* is64bit= */ true, GetAssembler());
}
static void GenReverse(LocationSummary* locations,
@@ -911,11 +911,11 @@
codegen->GenerateReferenceLoadWithBakerReadBarrier(invoke,
trg_loc,
base,
- /* offset */ 0U,
- /* index */ offset_loc,
+ /* offset= */ 0U,
+ /* index= */ offset_loc,
TIMES_1,
temp,
- /* needs_null_check */ false);
+ /* needs_null_check= */ false);
if (is_volatile) {
__ Sync(0);
}
@@ -928,8 +928,8 @@
trg_loc,
trg_loc,
base_loc,
- /* offset */ 0U,
- /* index */ offset_loc);
+ /* offset= */ 0U,
+ /* index= */ offset_loc);
}
} else {
__ Lwu(trg, TMP, 0);
@@ -952,7 +952,7 @@
}
void IntrinsicCodeGeneratorMIPS64::VisitUnsafeGet(HInvoke* invoke) {
- GenUnsafeGet(invoke, DataType::Type::kInt32, /* is_volatile */ false, codegen_);
+ GenUnsafeGet(invoke, DataType::Type::kInt32, /* is_volatile= */ false, codegen_);
}
// int sun.misc.Unsafe.getIntVolatile(Object o, long offset)
@@ -961,7 +961,7 @@
}
void IntrinsicCodeGeneratorMIPS64::VisitUnsafeGetVolatile(HInvoke* invoke) {
- GenUnsafeGet(invoke, DataType::Type::kInt32, /* is_volatile */ true, codegen_);
+ GenUnsafeGet(invoke, DataType::Type::kInt32, /* is_volatile= */ true, codegen_);
}
// long sun.misc.Unsafe.getLong(Object o, long offset)
@@ -970,7 +970,7 @@
}
void IntrinsicCodeGeneratorMIPS64::VisitUnsafeGetLong(HInvoke* invoke) {
- GenUnsafeGet(invoke, DataType::Type::kInt64, /* is_volatile */ false, codegen_);
+ GenUnsafeGet(invoke, DataType::Type::kInt64, /* is_volatile= */ false, codegen_);
}
// long sun.misc.Unsafe.getLongVolatile(Object o, long offset)
@@ -979,7 +979,7 @@
}
void IntrinsicCodeGeneratorMIPS64::VisitUnsafeGetLongVolatile(HInvoke* invoke) {
- GenUnsafeGet(invoke, DataType::Type::kInt64, /* is_volatile */ true, codegen_);
+ GenUnsafeGet(invoke, DataType::Type::kInt64, /* is_volatile= */ true, codegen_);
}
// Object sun.misc.Unsafe.getObject(Object o, long offset)
@@ -988,7 +988,7 @@
}
void IntrinsicCodeGeneratorMIPS64::VisitUnsafeGetObject(HInvoke* invoke) {
- GenUnsafeGet(invoke, DataType::Type::kReference, /* is_volatile */ false, codegen_);
+ GenUnsafeGet(invoke, DataType::Type::kReference, /* is_volatile= */ false, codegen_);
}
// Object sun.misc.Unsafe.getObjectVolatile(Object o, long offset)
@@ -997,7 +997,7 @@
}
void IntrinsicCodeGeneratorMIPS64::VisitUnsafeGetObjectVolatile(HInvoke* invoke) {
- GenUnsafeGet(invoke, DataType::Type::kReference, /* is_volatile */ true, codegen_);
+ GenUnsafeGet(invoke, DataType::Type::kReference, /* is_volatile= */ true, codegen_);
}
static void CreateIntIntIntIntToVoid(ArenaAllocator* allocator, HInvoke* invoke) {
@@ -1067,8 +1067,8 @@
void IntrinsicCodeGeneratorMIPS64::VisitUnsafePut(HInvoke* invoke) {
GenUnsafePut(invoke->GetLocations(),
DataType::Type::kInt32,
- /* is_volatile */ false,
- /* is_ordered */ false,
+ /* is_volatile= */ false,
+ /* is_ordered= */ false,
codegen_);
}
@@ -1080,8 +1080,8 @@
void IntrinsicCodeGeneratorMIPS64::VisitUnsafePutOrdered(HInvoke* invoke) {
GenUnsafePut(invoke->GetLocations(),
DataType::Type::kInt32,
- /* is_volatile */ false,
- /* is_ordered */ true,
+ /* is_volatile= */ false,
+ /* is_ordered= */ true,
codegen_);
}
@@ -1093,8 +1093,8 @@
void IntrinsicCodeGeneratorMIPS64::VisitUnsafePutVolatile(HInvoke* invoke) {
GenUnsafePut(invoke->GetLocations(),
DataType::Type::kInt32,
- /* is_volatile */ true,
- /* is_ordered */ false,
+ /* is_volatile= */ true,
+ /* is_ordered= */ false,
codegen_);
}
@@ -1106,8 +1106,8 @@
void IntrinsicCodeGeneratorMIPS64::VisitUnsafePutObject(HInvoke* invoke) {
GenUnsafePut(invoke->GetLocations(),
DataType::Type::kReference,
- /* is_volatile */ false,
- /* is_ordered */ false,
+ /* is_volatile= */ false,
+ /* is_ordered= */ false,
codegen_);
}
@@ -1119,8 +1119,8 @@
void IntrinsicCodeGeneratorMIPS64::VisitUnsafePutObjectOrdered(HInvoke* invoke) {
GenUnsafePut(invoke->GetLocations(),
DataType::Type::kReference,
- /* is_volatile */ false,
- /* is_ordered */ true,
+ /* is_volatile= */ false,
+ /* is_ordered= */ true,
codegen_);
}
@@ -1132,8 +1132,8 @@
void IntrinsicCodeGeneratorMIPS64::VisitUnsafePutObjectVolatile(HInvoke* invoke) {
GenUnsafePut(invoke->GetLocations(),
DataType::Type::kReference,
- /* is_volatile */ true,
- /* is_ordered */ false,
+ /* is_volatile= */ true,
+ /* is_ordered= */ false,
codegen_);
}
@@ -1145,8 +1145,8 @@
void IntrinsicCodeGeneratorMIPS64::VisitUnsafePutLong(HInvoke* invoke) {
GenUnsafePut(invoke->GetLocations(),
DataType::Type::kInt64,
- /* is_volatile */ false,
- /* is_ordered */ false,
+ /* is_volatile= */ false,
+ /* is_ordered= */ false,
codegen_);
}
@@ -1158,8 +1158,8 @@
void IntrinsicCodeGeneratorMIPS64::VisitUnsafePutLongOrdered(HInvoke* invoke) {
GenUnsafePut(invoke->GetLocations(),
DataType::Type::kInt64,
- /* is_volatile */ false,
- /* is_ordered */ true,
+ /* is_volatile= */ false,
+ /* is_ordered= */ true,
codegen_);
}
@@ -1171,8 +1171,8 @@
void IntrinsicCodeGeneratorMIPS64::VisitUnsafePutLongVolatile(HInvoke* invoke) {
GenUnsafePut(invoke->GetLocations(),
DataType::Type::kInt64,
- /* is_volatile */ true,
- /* is_ordered */ false,
+ /* is_volatile= */ true,
+ /* is_ordered= */ false,
codegen_);
}
@@ -1234,12 +1234,12 @@
invoke,
out_loc, // Unused, used only as a "temporary" within the read barrier.
base,
- /* offset */ 0u,
- /* index */ offset_loc,
+ /* offset= */ 0u,
+ /* index= */ offset_loc,
ScaleFactor::TIMES_1,
temp,
- /* needs_null_check */ false,
- /* always_update_field */ true);
+ /* needs_null_check= */ false,
+ /* always_update_field= */ true);
}
}
@@ -1548,7 +1548,7 @@
}
void IntrinsicCodeGeneratorMIPS64::VisitStringIndexOf(HInvoke* invoke) {
- GenerateStringIndexOf(invoke, GetAssembler(), codegen_, /* start_at_zero */ true);
+ GenerateStringIndexOf(invoke, GetAssembler(), codegen_, /* start_at_zero= */ true);
}
// int java.lang.String.indexOf(int ch, int fromIndex)
@@ -1566,7 +1566,7 @@
}
void IntrinsicCodeGeneratorMIPS64::VisitStringIndexOfAfter(HInvoke* invoke) {
- GenerateStringIndexOf(invoke, GetAssembler(), codegen_, /* start_at_zero */ false);
+ GenerateStringIndexOf(invoke, GetAssembler(), codegen_, /* start_at_zero= */ false);
}
// java.lang.StringFactory.newStringFromBytes(byte[] data, int high, int offset, int byteCount)
@@ -1667,7 +1667,7 @@
}
void IntrinsicCodeGeneratorMIPS64::VisitFloatIsInfinite(HInvoke* invoke) {
- GenIsInfinite(invoke->GetLocations(), /* is64bit */ false, GetAssembler());
+ GenIsInfinite(invoke->GetLocations(), /* is64bit= */ false, GetAssembler());
}
// boolean java.lang.Double.isInfinite(double)
@@ -1676,7 +1676,7 @@
}
void IntrinsicCodeGeneratorMIPS64::VisitDoubleIsInfinite(HInvoke* invoke) {
- GenIsInfinite(invoke->GetLocations(), /* is64bit */ true, GetAssembler());
+ GenIsInfinite(invoke->GetLocations(), /* is64bit= */ true, GetAssembler());
}
// void java.lang.String.getChars(int srcBegin, int srcEnd, char[] dst, int dstBegin)
@@ -2348,6 +2348,7 @@
UNIMPLEMENTED_INTRINSIC(MIPS64, SystemArrayCopy)
UNIMPLEMENTED_INTRINSIC(MIPS64, CRC32Update)
UNIMPLEMENTED_INTRINSIC(MIPS64, CRC32UpdateBytes)
+UNIMPLEMENTED_INTRINSIC(MIPS64, CRC32UpdateByteBuffer)
UNIMPLEMENTED_INTRINSIC(MIPS64, StringStringIndexOf);
UNIMPLEMENTED_INTRINSIC(MIPS64, StringStringIndexOfAfter);
diff --git a/compiler/optimizing/intrinsics_x86.cc b/compiler/optimizing/intrinsics_x86.cc
index 1d94950..8747f06 100644
--- a/compiler/optimizing/intrinsics_x86.cc
+++ b/compiler/optimizing/intrinsics_x86.cc
@@ -223,31 +223,31 @@
}
void IntrinsicLocationsBuilderX86::VisitDoubleDoubleToRawLongBits(HInvoke* invoke) {
- CreateFPToIntLocations(allocator_, invoke, /* is64bit */ true);
+ CreateFPToIntLocations(allocator_, invoke, /* is64bit= */ true);
}
void IntrinsicLocationsBuilderX86::VisitDoubleLongBitsToDouble(HInvoke* invoke) {
- CreateIntToFPLocations(allocator_, invoke, /* is64bit */ true);
+ CreateIntToFPLocations(allocator_, invoke, /* is64bit= */ true);
}
void IntrinsicCodeGeneratorX86::VisitDoubleDoubleToRawLongBits(HInvoke* invoke) {
- MoveFPToInt(invoke->GetLocations(), /* is64bit */ true, GetAssembler());
+ MoveFPToInt(invoke->GetLocations(), /* is64bit= */ true, GetAssembler());
}
void IntrinsicCodeGeneratorX86::VisitDoubleLongBitsToDouble(HInvoke* invoke) {
- MoveIntToFP(invoke->GetLocations(), /* is64bit */ true, GetAssembler());
+ MoveIntToFP(invoke->GetLocations(), /* is64bit= */ true, GetAssembler());
}
void IntrinsicLocationsBuilderX86::VisitFloatFloatToRawIntBits(HInvoke* invoke) {
- CreateFPToIntLocations(allocator_, invoke, /* is64bit */ false);
+ CreateFPToIntLocations(allocator_, invoke, /* is64bit= */ false);
}
void IntrinsicLocationsBuilderX86::VisitFloatIntBitsToFloat(HInvoke* invoke) {
- CreateIntToFPLocations(allocator_, invoke, /* is64bit */ false);
+ CreateIntToFPLocations(allocator_, invoke, /* is64bit= */ false);
}
void IntrinsicCodeGeneratorX86::VisitFloatFloatToRawIntBits(HInvoke* invoke) {
- MoveFPToInt(invoke->GetLocations(), /* is64bit */ false, GetAssembler());
+ MoveFPToInt(invoke->GetLocations(), /* is64bit= */ false, GetAssembler());
}
void IntrinsicCodeGeneratorX86::VisitFloatIntBitsToFloat(HInvoke* invoke) {
- MoveIntToFP(invoke->GetLocations(), /* is64bit */ false, GetAssembler());
+ MoveIntToFP(invoke->GetLocations(), /* is64bit= */ false, GetAssembler());
}
static void CreateIntToIntLocations(ArenaAllocator* allocator, HInvoke* invoke) {
@@ -1317,19 +1317,19 @@
}
void IntrinsicLocationsBuilderX86::VisitStringIndexOf(HInvoke* invoke) {
- CreateStringIndexOfLocations(invoke, allocator_, /* start_at_zero */ true);
+ CreateStringIndexOfLocations(invoke, allocator_, /* start_at_zero= */ true);
}
void IntrinsicCodeGeneratorX86::VisitStringIndexOf(HInvoke* invoke) {
- GenerateStringIndexOf(invoke, GetAssembler(), codegen_, /* start_at_zero */ true);
+ GenerateStringIndexOf(invoke, GetAssembler(), codegen_, /* start_at_zero= */ true);
}
void IntrinsicLocationsBuilderX86::VisitStringIndexOfAfter(HInvoke* invoke) {
- CreateStringIndexOfLocations(invoke, allocator_, /* start_at_zero */ false);
+ CreateStringIndexOfLocations(invoke, allocator_, /* start_at_zero= */ false);
}
void IntrinsicCodeGeneratorX86::VisitStringIndexOfAfter(HInvoke* invoke) {
- GenerateStringIndexOf(invoke, GetAssembler(), codegen_, /* start_at_zero */ false);
+ GenerateStringIndexOf(invoke, GetAssembler(), codegen_, /* start_at_zero= */ false);
}
void IntrinsicLocationsBuilderX86::VisitStringNewStringFromBytes(HInvoke* invoke) {
@@ -1691,7 +1691,7 @@
if (kUseBakerReadBarrier) {
Address src(base, offset, ScaleFactor::TIMES_1, 0);
codegen->GenerateReferenceLoadWithBakerReadBarrier(
- invoke, output_loc, base, src, /* needs_null_check */ false);
+ invoke, output_loc, base, src, /* needs_null_check= */ false);
} else {
__ movl(output, Address(base, offset, ScaleFactor::TIMES_1, 0));
codegen->GenerateReadBarrierSlow(
@@ -1762,45 +1762,45 @@
void IntrinsicLocationsBuilderX86::VisitUnsafeGet(HInvoke* invoke) {
CreateIntIntIntToIntLocations(
- allocator_, invoke, DataType::Type::kInt32, /* is_volatile */ false);
+ allocator_, invoke, DataType::Type::kInt32, /* is_volatile= */ false);
}
void IntrinsicLocationsBuilderX86::VisitUnsafeGetVolatile(HInvoke* invoke) {
- CreateIntIntIntToIntLocations(allocator_, invoke, DataType::Type::kInt32, /* is_volatile */ true);
+ CreateIntIntIntToIntLocations(allocator_, invoke, DataType::Type::kInt32, /* is_volatile= */ true);
}
void IntrinsicLocationsBuilderX86::VisitUnsafeGetLong(HInvoke* invoke) {
CreateIntIntIntToIntLocations(
- allocator_, invoke, DataType::Type::kInt64, /* is_volatile */ false);
+ allocator_, invoke, DataType::Type::kInt64, /* is_volatile= */ false);
}
void IntrinsicLocationsBuilderX86::VisitUnsafeGetLongVolatile(HInvoke* invoke) {
- CreateIntIntIntToIntLocations(allocator_, invoke, DataType::Type::kInt64, /* is_volatile */ true);
+ CreateIntIntIntToIntLocations(allocator_, invoke, DataType::Type::kInt64, /* is_volatile= */ true);
}
void IntrinsicLocationsBuilderX86::VisitUnsafeGetObject(HInvoke* invoke) {
CreateIntIntIntToIntLocations(
- allocator_, invoke, DataType::Type::kReference, /* is_volatile */ false);
+ allocator_, invoke, DataType::Type::kReference, /* is_volatile= */ false);
}
void IntrinsicLocationsBuilderX86::VisitUnsafeGetObjectVolatile(HInvoke* invoke) {
CreateIntIntIntToIntLocations(
- allocator_, invoke, DataType::Type::kReference, /* is_volatile */ true);
+ allocator_, invoke, DataType::Type::kReference, /* is_volatile= */ true);
}
void IntrinsicCodeGeneratorX86::VisitUnsafeGet(HInvoke* invoke) {
- GenUnsafeGet(invoke, DataType::Type::kInt32, /* is_volatile */ false, codegen_);
+ GenUnsafeGet(invoke, DataType::Type::kInt32, /* is_volatile= */ false, codegen_);
}
void IntrinsicCodeGeneratorX86::VisitUnsafeGetVolatile(HInvoke* invoke) {
- GenUnsafeGet(invoke, DataType::Type::kInt32, /* is_volatile */ true, codegen_);
+ GenUnsafeGet(invoke, DataType::Type::kInt32, /* is_volatile= */ true, codegen_);
}
void IntrinsicCodeGeneratorX86::VisitUnsafeGetLong(HInvoke* invoke) {
- GenUnsafeGet(invoke, DataType::Type::kInt64, /* is_volatile */ false, codegen_);
+ GenUnsafeGet(invoke, DataType::Type::kInt64, /* is_volatile= */ false, codegen_);
}
void IntrinsicCodeGeneratorX86::VisitUnsafeGetLongVolatile(HInvoke* invoke) {
- GenUnsafeGet(invoke, DataType::Type::kInt64, /* is_volatile */ true, codegen_);
+ GenUnsafeGet(invoke, DataType::Type::kInt64, /* is_volatile= */ true, codegen_);
}
void IntrinsicCodeGeneratorX86::VisitUnsafeGetObject(HInvoke* invoke) {
- GenUnsafeGet(invoke, DataType::Type::kReference, /* is_volatile */ false, codegen_);
+ GenUnsafeGet(invoke, DataType::Type::kReference, /* is_volatile= */ false, codegen_);
}
void IntrinsicCodeGeneratorX86::VisitUnsafeGetObjectVolatile(HInvoke* invoke) {
- GenUnsafeGet(invoke, DataType::Type::kReference, /* is_volatile */ true, codegen_);
+ GenUnsafeGet(invoke, DataType::Type::kReference, /* is_volatile= */ true, codegen_);
}
@@ -1827,39 +1827,39 @@
void IntrinsicLocationsBuilderX86::VisitUnsafePut(HInvoke* invoke) {
CreateIntIntIntIntToVoidPlusTempsLocations(
- allocator_, DataType::Type::kInt32, invoke, /* is_volatile */ false);
+ allocator_, DataType::Type::kInt32, invoke, /* is_volatile= */ false);
}
void IntrinsicLocationsBuilderX86::VisitUnsafePutOrdered(HInvoke* invoke) {
CreateIntIntIntIntToVoidPlusTempsLocations(
- allocator_, DataType::Type::kInt32, invoke, /* is_volatile */ false);
+ allocator_, DataType::Type::kInt32, invoke, /* is_volatile= */ false);
}
void IntrinsicLocationsBuilderX86::VisitUnsafePutVolatile(HInvoke* invoke) {
CreateIntIntIntIntToVoidPlusTempsLocations(
- allocator_, DataType::Type::kInt32, invoke, /* is_volatile */ true);
+ allocator_, DataType::Type::kInt32, invoke, /* is_volatile= */ true);
}
void IntrinsicLocationsBuilderX86::VisitUnsafePutObject(HInvoke* invoke) {
CreateIntIntIntIntToVoidPlusTempsLocations(
- allocator_, DataType::Type::kReference, invoke, /* is_volatile */ false);
+ allocator_, DataType::Type::kReference, invoke, /* is_volatile= */ false);
}
void IntrinsicLocationsBuilderX86::VisitUnsafePutObjectOrdered(HInvoke* invoke) {
CreateIntIntIntIntToVoidPlusTempsLocations(
- allocator_, DataType::Type::kReference, invoke, /* is_volatile */ false);
+ allocator_, DataType::Type::kReference, invoke, /* is_volatile= */ false);
}
void IntrinsicLocationsBuilderX86::VisitUnsafePutObjectVolatile(HInvoke* invoke) {
CreateIntIntIntIntToVoidPlusTempsLocations(
- allocator_, DataType::Type::kReference, invoke, /* is_volatile */ true);
+ allocator_, DataType::Type::kReference, invoke, /* is_volatile= */ true);
}
void IntrinsicLocationsBuilderX86::VisitUnsafePutLong(HInvoke* invoke) {
CreateIntIntIntIntToVoidPlusTempsLocations(
- allocator_, DataType::Type::kInt64, invoke, /* is_volatile */ false);
+ allocator_, DataType::Type::kInt64, invoke, /* is_volatile= */ false);
}
void IntrinsicLocationsBuilderX86::VisitUnsafePutLongOrdered(HInvoke* invoke) {
CreateIntIntIntIntToVoidPlusTempsLocations(
- allocator_, DataType::Type::kInt64, invoke, /* is_volatile */ false);
+ allocator_, DataType::Type::kInt64, invoke, /* is_volatile= */ false);
}
void IntrinsicLocationsBuilderX86::VisitUnsafePutLongVolatile(HInvoke* invoke) {
CreateIntIntIntIntToVoidPlusTempsLocations(
- allocator_, DataType::Type::kInt64, invoke, /* is_volatile */ true);
+ allocator_, DataType::Type::kInt64, invoke, /* is_volatile= */ true);
}
// We don't care for ordered: it requires an AnyStore barrier, which is already given by the x86
@@ -1911,34 +1911,34 @@
}
void IntrinsicCodeGeneratorX86::VisitUnsafePut(HInvoke* invoke) {
- GenUnsafePut(invoke->GetLocations(), DataType::Type::kInt32, /* is_volatile */ false, codegen_);
+ GenUnsafePut(invoke->GetLocations(), DataType::Type::kInt32, /* is_volatile= */ false, codegen_);
}
void IntrinsicCodeGeneratorX86::VisitUnsafePutOrdered(HInvoke* invoke) {
- GenUnsafePut(invoke->GetLocations(), DataType::Type::kInt32, /* is_volatile */ false, codegen_);
+ GenUnsafePut(invoke->GetLocations(), DataType::Type::kInt32, /* is_volatile= */ false, codegen_);
}
void IntrinsicCodeGeneratorX86::VisitUnsafePutVolatile(HInvoke* invoke) {
- GenUnsafePut(invoke->GetLocations(), DataType::Type::kInt32, /* is_volatile */ true, codegen_);
+ GenUnsafePut(invoke->GetLocations(), DataType::Type::kInt32, /* is_volatile= */ true, codegen_);
}
void IntrinsicCodeGeneratorX86::VisitUnsafePutObject(HInvoke* invoke) {
GenUnsafePut(
- invoke->GetLocations(), DataType::Type::kReference, /* is_volatile */ false, codegen_);
+ invoke->GetLocations(), DataType::Type::kReference, /* is_volatile= */ false, codegen_);
}
void IntrinsicCodeGeneratorX86::VisitUnsafePutObjectOrdered(HInvoke* invoke) {
GenUnsafePut(
- invoke->GetLocations(), DataType::Type::kReference, /* is_volatile */ false, codegen_);
+ invoke->GetLocations(), DataType::Type::kReference, /* is_volatile= */ false, codegen_);
}
void IntrinsicCodeGeneratorX86::VisitUnsafePutObjectVolatile(HInvoke* invoke) {
GenUnsafePut(
- invoke->GetLocations(), DataType::Type::kReference, /* is_volatile */ true, codegen_);
+ invoke->GetLocations(), DataType::Type::kReference, /* is_volatile= */ true, codegen_);
}
void IntrinsicCodeGeneratorX86::VisitUnsafePutLong(HInvoke* invoke) {
- GenUnsafePut(invoke->GetLocations(), DataType::Type::kInt64, /* is_volatile */ false, codegen_);
+ GenUnsafePut(invoke->GetLocations(), DataType::Type::kInt64, /* is_volatile= */ false, codegen_);
}
void IntrinsicCodeGeneratorX86::VisitUnsafePutLongOrdered(HInvoke* invoke) {
- GenUnsafePut(invoke->GetLocations(), DataType::Type::kInt64, /* is_volatile */ false, codegen_);
+ GenUnsafePut(invoke->GetLocations(), DataType::Type::kInt64, /* is_volatile= */ false, codegen_);
}
void IntrinsicCodeGeneratorX86::VisitUnsafePutLongVolatile(HInvoke* invoke) {
- GenUnsafePut(invoke->GetLocations(), DataType::Type::kInt64, /* is_volatile */ true, codegen_);
+ GenUnsafePut(invoke->GetLocations(), DataType::Type::kInt64, /* is_volatile= */ true, codegen_);
}
static void CreateIntIntIntIntIntToInt(ArenaAllocator* allocator,
@@ -2035,8 +2035,8 @@
temp1_loc, // Unused, used only as a "temporary" within the read barrier.
base,
field_addr,
- /* needs_null_check */ false,
- /* always_update_field */ true,
+ /* needs_null_check= */ false,
+ /* always_update_field= */ true,
&temp2);
}
@@ -2267,19 +2267,19 @@
}
void IntrinsicLocationsBuilderX86::VisitIntegerBitCount(HInvoke* invoke) {
- CreateBitCountLocations(allocator_, codegen_, invoke, /* is_long */ false);
+ CreateBitCountLocations(allocator_, codegen_, invoke, /* is_long= */ false);
}
void IntrinsicCodeGeneratorX86::VisitIntegerBitCount(HInvoke* invoke) {
- GenBitCount(GetAssembler(), codegen_, invoke, /* is_long */ false);
+ GenBitCount(GetAssembler(), codegen_, invoke, /* is_long= */ false);
}
void IntrinsicLocationsBuilderX86::VisitLongBitCount(HInvoke* invoke) {
- CreateBitCountLocations(allocator_, codegen_, invoke, /* is_long */ true);
+ CreateBitCountLocations(allocator_, codegen_, invoke, /* is_long= */ true);
}
void IntrinsicCodeGeneratorX86::VisitLongBitCount(HInvoke* invoke) {
- GenBitCount(GetAssembler(), codegen_, invoke, /* is_long */ true);
+ GenBitCount(GetAssembler(), codegen_, invoke, /* is_long= */ true);
}
static void CreateLeadingZeroLocations(ArenaAllocator* allocator, HInvoke* invoke, bool is_long) {
@@ -2371,19 +2371,19 @@
}
void IntrinsicLocationsBuilderX86::VisitIntegerNumberOfLeadingZeros(HInvoke* invoke) {
- CreateLeadingZeroLocations(allocator_, invoke, /* is_long */ false);
+ CreateLeadingZeroLocations(allocator_, invoke, /* is_long= */ false);
}
void IntrinsicCodeGeneratorX86::VisitIntegerNumberOfLeadingZeros(HInvoke* invoke) {
- GenLeadingZeros(GetAssembler(), codegen_, invoke, /* is_long */ false);
+ GenLeadingZeros(GetAssembler(), codegen_, invoke, /* is_long= */ false);
}
void IntrinsicLocationsBuilderX86::VisitLongNumberOfLeadingZeros(HInvoke* invoke) {
- CreateLeadingZeroLocations(allocator_, invoke, /* is_long */ true);
+ CreateLeadingZeroLocations(allocator_, invoke, /* is_long= */ true);
}
void IntrinsicCodeGeneratorX86::VisitLongNumberOfLeadingZeros(HInvoke* invoke) {
- GenLeadingZeros(GetAssembler(), codegen_, invoke, /* is_long */ true);
+ GenLeadingZeros(GetAssembler(), codegen_, invoke, /* is_long= */ true);
}
static void CreateTrailingZeroLocations(ArenaAllocator* allocator, HInvoke* invoke, bool is_long) {
@@ -2462,19 +2462,19 @@
}
void IntrinsicLocationsBuilderX86::VisitIntegerNumberOfTrailingZeros(HInvoke* invoke) {
- CreateTrailingZeroLocations(allocator_, invoke, /* is_long */ false);
+ CreateTrailingZeroLocations(allocator_, invoke, /* is_long= */ false);
}
void IntrinsicCodeGeneratorX86::VisitIntegerNumberOfTrailingZeros(HInvoke* invoke) {
- GenTrailingZeros(GetAssembler(), codegen_, invoke, /* is_long */ false);
+ GenTrailingZeros(GetAssembler(), codegen_, invoke, /* is_long= */ false);
}
void IntrinsicLocationsBuilderX86::VisitLongNumberOfTrailingZeros(HInvoke* invoke) {
- CreateTrailingZeroLocations(allocator_, invoke, /* is_long */ true);
+ CreateTrailingZeroLocations(allocator_, invoke, /* is_long= */ true);
}
void IntrinsicCodeGeneratorX86::VisitLongNumberOfTrailingZeros(HInvoke* invoke) {
- GenTrailingZeros(GetAssembler(), codegen_, invoke, /* is_long */ true);
+ GenTrailingZeros(GetAssembler(), codegen_, invoke, /* is_long= */ true);
}
static bool IsSameInput(HInstruction* instruction, size_t input0, size_t input1) {
@@ -2682,11 +2682,11 @@
if (kEmitCompilerReadBarrier && kUseBakerReadBarrier) {
// /* HeapReference<Class> */ temp1 = src->klass_
codegen_->GenerateFieldLoadWithBakerReadBarrier(
- invoke, temp1_loc, src, class_offset, /* needs_null_check */ false);
+ invoke, temp1_loc, src, class_offset, /* needs_null_check= */ false);
// Bail out if the source is not a non primitive array.
// /* HeapReference<Class> */ temp1 = temp1->component_type_
codegen_->GenerateFieldLoadWithBakerReadBarrier(
- invoke, temp1_loc, temp1, component_offset, /* needs_null_check */ false);
+ invoke, temp1_loc, temp1, component_offset, /* needs_null_check= */ false);
__ testl(temp1, temp1);
__ j(kEqual, intrinsic_slow_path->GetEntryLabel());
// If heap poisoning is enabled, `temp1` has been unpoisoned
@@ -2719,7 +2719,7 @@
// /* HeapReference<Class> */ temp1 = dest->klass_
codegen_->GenerateFieldLoadWithBakerReadBarrier(
- invoke, temp1_loc, dest, class_offset, /* needs_null_check */ false);
+ invoke, temp1_loc, dest, class_offset, /* needs_null_check= */ false);
if (!optimizations.GetDestinationIsNonPrimitiveArray()) {
// Bail out if the destination is not a non primitive array.
@@ -2731,7 +2731,7 @@
// temporaries such a `temp1`.
// /* HeapReference<Class> */ temp2 = temp1->component_type_
codegen_->GenerateFieldLoadWithBakerReadBarrier(
- invoke, temp2_loc, temp1, component_offset, /* needs_null_check */ false);
+ invoke, temp2_loc, temp1, component_offset, /* needs_null_check= */ false);
__ testl(temp2, temp2);
__ j(kEqual, intrinsic_slow_path->GetEntryLabel());
// If heap poisoning is enabled, `temp2` has been unpoisoned
@@ -2744,7 +2744,7 @@
// read barrier emitted by GenerateFieldLoadWithBakerReadBarrier below.
// /* HeapReference<Class> */ temp2 = src->klass_
codegen_->GenerateFieldLoadWithBakerReadBarrier(
- invoke, temp2_loc, src, class_offset, /* needs_null_check */ false);
+ invoke, temp2_loc, src, class_offset, /* needs_null_check= */ false);
// Note: if heap poisoning is on, we are comparing two unpoisoned references here.
__ cmpl(temp1, temp2);
@@ -2753,7 +2753,7 @@
__ j(kEqual, &do_copy);
// /* HeapReference<Class> */ temp1 = temp1->component_type_
codegen_->GenerateFieldLoadWithBakerReadBarrier(
- invoke, temp1_loc, temp1, component_offset, /* needs_null_check */ false);
+ invoke, temp1_loc, temp1, component_offset, /* needs_null_check= */ false);
// We do not need to emit a read barrier for the following
// heap reference load, as `temp1` is only used in a
// comparison with null below, and this reference is not
@@ -2807,10 +2807,10 @@
if (kEmitCompilerReadBarrier && kUseBakerReadBarrier) {
// /* HeapReference<Class> */ temp1 = src->klass_
codegen_->GenerateFieldLoadWithBakerReadBarrier(
- invoke, temp1_loc, src, class_offset, /* needs_null_check */ false);
+ invoke, temp1_loc, src, class_offset, /* needs_null_check= */ false);
// /* HeapReference<Class> */ temp1 = temp1->component_type_
codegen_->GenerateFieldLoadWithBakerReadBarrier(
- invoke, temp1_loc, temp1, component_offset, /* needs_null_check */ false);
+ invoke, temp1_loc, temp1, component_offset, /* needs_null_check= */ false);
__ testl(temp1, temp1);
__ j(kEqual, intrinsic_slow_path->GetEntryLabel());
// If heap poisoning is enabled, `temp1` has been unpoisoned
@@ -2943,7 +2943,7 @@
}
// We only need one card marking on the destination array.
- codegen_->MarkGCCard(temp1, temp2, dest, Register(kNoRegister), /* value_can_be_null */ false);
+ codegen_->MarkGCCard(temp1, temp2, dest, Register(kNoRegister), /* value_can_be_null= */ false);
__ Bind(intrinsic_slow_path->GetExitLabel());
}
@@ -3072,6 +3072,7 @@
UNIMPLEMENTED_INTRINSIC(X86, LongHighestOneBit)
UNIMPLEMENTED_INTRINSIC(X86, CRC32Update)
UNIMPLEMENTED_INTRINSIC(X86, CRC32UpdateBytes)
+UNIMPLEMENTED_INTRINSIC(X86, CRC32UpdateByteBuffer)
UNIMPLEMENTED_INTRINSIC(X86, StringStringIndexOf);
UNIMPLEMENTED_INTRINSIC(X86, StringStringIndexOfAfter);
diff --git a/compiler/optimizing/intrinsics_x86_64.cc b/compiler/optimizing/intrinsics_x86_64.cc
index 4f0b61d..167c1d8 100644
--- a/compiler/optimizing/intrinsics_x86_64.cc
+++ b/compiler/optimizing/intrinsics_x86_64.cc
@@ -162,10 +162,10 @@
}
void IntrinsicCodeGeneratorX86_64::VisitDoubleDoubleToRawLongBits(HInvoke* invoke) {
- MoveFPToInt(invoke->GetLocations(), /* is64bit */ true, GetAssembler());
+ MoveFPToInt(invoke->GetLocations(), /* is64bit= */ true, GetAssembler());
}
void IntrinsicCodeGeneratorX86_64::VisitDoubleLongBitsToDouble(HInvoke* invoke) {
- MoveIntToFP(invoke->GetLocations(), /* is64bit */ true, GetAssembler());
+ MoveIntToFP(invoke->GetLocations(), /* is64bit= */ true, GetAssembler());
}
void IntrinsicLocationsBuilderX86_64::VisitFloatFloatToRawIntBits(HInvoke* invoke) {
@@ -176,10 +176,10 @@
}
void IntrinsicCodeGeneratorX86_64::VisitFloatFloatToRawIntBits(HInvoke* invoke) {
- MoveFPToInt(invoke->GetLocations(), /* is64bit */ false, GetAssembler());
+ MoveFPToInt(invoke->GetLocations(), /* is64bit= */ false, GetAssembler());
}
void IntrinsicCodeGeneratorX86_64::VisitFloatIntBitsToFloat(HInvoke* invoke) {
- MoveIntToFP(invoke->GetLocations(), /* is64bit */ false, GetAssembler());
+ MoveIntToFP(invoke->GetLocations(), /* is64bit= */ false, GetAssembler());
}
static void CreateIntToIntLocations(ArenaAllocator* allocator, HInvoke* invoke) {
@@ -430,12 +430,12 @@
// direct x86 instruction, since NaN should map to 0 and large positive
// values need to be clipped to the extreme value.
codegen_->Load64BitValue(out, kPrimLongMax);
- __ cvtsi2sd(t2, out, /* is64bit */ true);
+ __ cvtsi2sd(t2, out, /* is64bit= */ true);
__ comisd(t1, t2);
__ j(kAboveEqual, &done); // clipped to max (already in out), does not jump on unordered
__ movl(out, Immediate(0)); // does not change flags, implicit zero extension to 64-bit
__ j(kUnordered, &done); // NaN mapped to 0 (just moved in out)
- __ cvttsd2si(out, t1, /* is64bit */ true);
+ __ cvttsd2si(out, t1, /* is64bit= */ true);
__ Bind(&done);
}
@@ -979,7 +979,7 @@
if (kEmitCompilerReadBarrier && kUseBakerReadBarrier) {
// /* HeapReference<Class> */ temp1 = dest->klass_
codegen_->GenerateFieldLoadWithBakerReadBarrier(
- invoke, temp1_loc, dest, class_offset, /* needs_null_check */ false);
+ invoke, temp1_loc, dest, class_offset, /* needs_null_check= */ false);
// Register `temp1` is not trashed by the read barrier emitted
// by GenerateFieldLoadWithBakerReadBarrier below, as that
// method produces a call to a ReadBarrierMarkRegX entry point,
@@ -987,7 +987,7 @@
// temporaries such a `temp1`.
// /* HeapReference<Class> */ temp2 = src->klass_
codegen_->GenerateFieldLoadWithBakerReadBarrier(
- invoke, temp2_loc, src, class_offset, /* needs_null_check */ false);
+ invoke, temp2_loc, src, class_offset, /* needs_null_check= */ false);
// If heap poisoning is enabled, `temp1` and `temp2` have been
// unpoisoned by the the previous calls to
// GenerateFieldLoadWithBakerReadBarrier.
@@ -1011,7 +1011,7 @@
if (kEmitCompilerReadBarrier && kUseBakerReadBarrier) {
// /* HeapReference<Class> */ TMP = temp1->component_type_
codegen_->GenerateFieldLoadWithBakerReadBarrier(
- invoke, TMP_loc, temp1, component_offset, /* needs_null_check */ false);
+ invoke, TMP_loc, temp1, component_offset, /* needs_null_check= */ false);
__ testl(CpuRegister(TMP), CpuRegister(TMP));
__ j(kEqual, intrinsic_slow_path->GetEntryLabel());
// If heap poisoning is enabled, `TMP` has been unpoisoned by
@@ -1034,7 +1034,7 @@
// read barrier emitted by GenerateFieldLoadWithBakerReadBarrier below.
// /* HeapReference<Class> */ TMP = temp2->component_type_
codegen_->GenerateFieldLoadWithBakerReadBarrier(
- invoke, TMP_loc, temp2, component_offset, /* needs_null_check */ false);
+ invoke, TMP_loc, temp2, component_offset, /* needs_null_check= */ false);
__ testl(CpuRegister(TMP), CpuRegister(TMP));
__ j(kEqual, intrinsic_slow_path->GetEntryLabel());
// If heap poisoning is enabled, `TMP` has been unpoisoned by
@@ -1058,7 +1058,7 @@
if (kEmitCompilerReadBarrier && kUseBakerReadBarrier) {
// /* HeapReference<Class> */ temp1 = temp1->component_type_
codegen_->GenerateFieldLoadWithBakerReadBarrier(
- invoke, temp1_loc, temp1, component_offset, /* needs_null_check */ false);
+ invoke, temp1_loc, temp1, component_offset, /* needs_null_check= */ false);
// We do not need to emit a read barrier for the following
// heap reference load, as `temp1` is only used in a
// comparison with null below, and this reference is not
@@ -1086,10 +1086,10 @@
if (kEmitCompilerReadBarrier && kUseBakerReadBarrier) {
// /* HeapReference<Class> */ temp1 = src->klass_
codegen_->GenerateFieldLoadWithBakerReadBarrier(
- invoke, temp1_loc, src, class_offset, /* needs_null_check */ false);
+ invoke, temp1_loc, src, class_offset, /* needs_null_check= */ false);
// /* HeapReference<Class> */ TMP = temp1->component_type_
codegen_->GenerateFieldLoadWithBakerReadBarrier(
- invoke, TMP_loc, temp1, component_offset, /* needs_null_check */ false);
+ invoke, TMP_loc, temp1, component_offset, /* needs_null_check= */ false);
__ testl(CpuRegister(TMP), CpuRegister(TMP));
__ j(kEqual, intrinsic_slow_path->GetEntryLabel());
} else {
@@ -1198,7 +1198,7 @@
}
// We only need one card marking on the destination array.
- codegen_->MarkGCCard(temp1, temp2, dest, CpuRegister(kNoRegister), /* value_can_be_null */ false);
+ codegen_->MarkGCCard(temp1, temp2, dest, CpuRegister(kNoRegister), /* value_can_be_null= */ false);
__ Bind(intrinsic_slow_path->GetExitLabel());
}
@@ -1444,7 +1444,7 @@
// Ensure we have a start index >= 0;
__ xorl(counter, counter);
__ cmpl(start_index, Immediate(0));
- __ cmov(kGreater, counter, start_index, /* is64bit */ false); // 32-bit copy is enough.
+ __ cmov(kGreater, counter, start_index, /* is64bit= */ false); // 32-bit copy is enough.
if (mirror::kUseStringCompression) {
NearLabel modify_counter, offset_uncompressed_label;
@@ -1506,19 +1506,19 @@
}
void IntrinsicLocationsBuilderX86_64::VisitStringIndexOf(HInvoke* invoke) {
- CreateStringIndexOfLocations(invoke, allocator_, /* start_at_zero */ true);
+ CreateStringIndexOfLocations(invoke, allocator_, /* start_at_zero= */ true);
}
void IntrinsicCodeGeneratorX86_64::VisitStringIndexOf(HInvoke* invoke) {
- GenerateStringIndexOf(invoke, GetAssembler(), codegen_, /* start_at_zero */ true);
+ GenerateStringIndexOf(invoke, GetAssembler(), codegen_, /* start_at_zero= */ true);
}
void IntrinsicLocationsBuilderX86_64::VisitStringIndexOfAfter(HInvoke* invoke) {
- CreateStringIndexOfLocations(invoke, allocator_, /* start_at_zero */ false);
+ CreateStringIndexOfLocations(invoke, allocator_, /* start_at_zero= */ false);
}
void IntrinsicCodeGeneratorX86_64::VisitStringIndexOfAfter(HInvoke* invoke) {
- GenerateStringIndexOf(invoke, GetAssembler(), codegen_, /* start_at_zero */ false);
+ GenerateStringIndexOf(invoke, GetAssembler(), codegen_, /* start_at_zero= */ false);
}
void IntrinsicLocationsBuilderX86_64::VisitStringNewStringFromBytes(HInvoke* invoke) {
@@ -1832,7 +1832,7 @@
void IntrinsicCodeGeneratorX86_64::VisitThreadCurrentThread(HInvoke* invoke) {
CpuRegister out = invoke->GetLocations()->Out().AsRegister<CpuRegister>();
GetAssembler()->gs()->movl(out, Address::Absolute(Thread::PeerOffset<kX86_64PointerSize>(),
- /* no_rip */ true));
+ /* no_rip= */ true));
}
static void GenUnsafeGet(HInvoke* invoke,
@@ -1858,7 +1858,7 @@
if (kUseBakerReadBarrier) {
Address src(base, offset, ScaleFactor::TIMES_1, 0);
codegen->GenerateReferenceLoadWithBakerReadBarrier(
- invoke, output_loc, base, src, /* needs_null_check */ false);
+ invoke, output_loc, base, src, /* needs_null_check= */ false);
} else {
__ movl(output, Address(base, offset, ScaleFactor::TIMES_1, 0));
codegen->GenerateReadBarrierSlow(
@@ -1922,22 +1922,22 @@
void IntrinsicCodeGeneratorX86_64::VisitUnsafeGet(HInvoke* invoke) {
- GenUnsafeGet(invoke, DataType::Type::kInt32, /* is_volatile */ false, codegen_);
+ GenUnsafeGet(invoke, DataType::Type::kInt32, /* is_volatile= */ false, codegen_);
}
void IntrinsicCodeGeneratorX86_64::VisitUnsafeGetVolatile(HInvoke* invoke) {
- GenUnsafeGet(invoke, DataType::Type::kInt32, /* is_volatile */ true, codegen_);
+ GenUnsafeGet(invoke, DataType::Type::kInt32, /* is_volatile= */ true, codegen_);
}
void IntrinsicCodeGeneratorX86_64::VisitUnsafeGetLong(HInvoke* invoke) {
- GenUnsafeGet(invoke, DataType::Type::kInt64, /* is_volatile */ false, codegen_);
+ GenUnsafeGet(invoke, DataType::Type::kInt64, /* is_volatile= */ false, codegen_);
}
void IntrinsicCodeGeneratorX86_64::VisitUnsafeGetLongVolatile(HInvoke* invoke) {
- GenUnsafeGet(invoke, DataType::Type::kInt64, /* is_volatile */ true, codegen_);
+ GenUnsafeGet(invoke, DataType::Type::kInt64, /* is_volatile= */ true, codegen_);
}
void IntrinsicCodeGeneratorX86_64::VisitUnsafeGetObject(HInvoke* invoke) {
- GenUnsafeGet(invoke, DataType::Type::kReference, /* is_volatile */ false, codegen_);
+ GenUnsafeGet(invoke, DataType::Type::kReference, /* is_volatile= */ false, codegen_);
}
void IntrinsicCodeGeneratorX86_64::VisitUnsafeGetObjectVolatile(HInvoke* invoke) {
- GenUnsafeGet(invoke, DataType::Type::kReference, /* is_volatile */ true, codegen_);
+ GenUnsafeGet(invoke, DataType::Type::kReference, /* is_volatile= */ true, codegen_);
}
@@ -2020,34 +2020,34 @@
}
void IntrinsicCodeGeneratorX86_64::VisitUnsafePut(HInvoke* invoke) {
- GenUnsafePut(invoke->GetLocations(), DataType::Type::kInt32, /* is_volatile */ false, codegen_);
+ GenUnsafePut(invoke->GetLocations(), DataType::Type::kInt32, /* is_volatile= */ false, codegen_);
}
void IntrinsicCodeGeneratorX86_64::VisitUnsafePutOrdered(HInvoke* invoke) {
- GenUnsafePut(invoke->GetLocations(), DataType::Type::kInt32, /* is_volatile */ false, codegen_);
+ GenUnsafePut(invoke->GetLocations(), DataType::Type::kInt32, /* is_volatile= */ false, codegen_);
}
void IntrinsicCodeGeneratorX86_64::VisitUnsafePutVolatile(HInvoke* invoke) {
- GenUnsafePut(invoke->GetLocations(), DataType::Type::kInt32, /* is_volatile */ true, codegen_);
+ GenUnsafePut(invoke->GetLocations(), DataType::Type::kInt32, /* is_volatile= */ true, codegen_);
}
void IntrinsicCodeGeneratorX86_64::VisitUnsafePutObject(HInvoke* invoke) {
GenUnsafePut(
- invoke->GetLocations(), DataType::Type::kReference, /* is_volatile */ false, codegen_);
+ invoke->GetLocations(), DataType::Type::kReference, /* is_volatile= */ false, codegen_);
}
void IntrinsicCodeGeneratorX86_64::VisitUnsafePutObjectOrdered(HInvoke* invoke) {
GenUnsafePut(
- invoke->GetLocations(), DataType::Type::kReference, /* is_volatile */ false, codegen_);
+ invoke->GetLocations(), DataType::Type::kReference, /* is_volatile= */ false, codegen_);
}
void IntrinsicCodeGeneratorX86_64::VisitUnsafePutObjectVolatile(HInvoke* invoke) {
GenUnsafePut(
- invoke->GetLocations(), DataType::Type::kReference, /* is_volatile */ true, codegen_);
+ invoke->GetLocations(), DataType::Type::kReference, /* is_volatile= */ true, codegen_);
}
void IntrinsicCodeGeneratorX86_64::VisitUnsafePutLong(HInvoke* invoke) {
- GenUnsafePut(invoke->GetLocations(), DataType::Type::kInt64, /* is_volatile */ false, codegen_);
+ GenUnsafePut(invoke->GetLocations(), DataType::Type::kInt64, /* is_volatile= */ false, codegen_);
}
void IntrinsicCodeGeneratorX86_64::VisitUnsafePutLongOrdered(HInvoke* invoke) {
- GenUnsafePut(invoke->GetLocations(), DataType::Type::kInt64, /* is_volatile */ false, codegen_);
+ GenUnsafePut(invoke->GetLocations(), DataType::Type::kInt64, /* is_volatile= */ false, codegen_);
}
void IntrinsicCodeGeneratorX86_64::VisitUnsafePutLongVolatile(HInvoke* invoke) {
- GenUnsafePut(invoke->GetLocations(), DataType::Type::kInt64, /* is_volatile */ true, codegen_);
+ GenUnsafePut(invoke->GetLocations(), DataType::Type::kInt64, /* is_volatile= */ true, codegen_);
}
static void CreateIntIntIntIntIntToInt(ArenaAllocator* allocator,
@@ -2132,8 +2132,8 @@
out_loc, // Unused, used only as a "temporary" within the read barrier.
base,
field_addr,
- /* needs_null_check */ false,
- /* always_update_field */ true,
+ /* needs_null_check= */ false,
+ /* always_update_field= */ true,
&temp1,
&temp2);
}
@@ -2361,7 +2361,7 @@
}
void IntrinsicCodeGeneratorX86_64::VisitIntegerBitCount(HInvoke* invoke) {
- GenBitCount(GetAssembler(), codegen_, invoke, /* is_long */ false);
+ GenBitCount(GetAssembler(), codegen_, invoke, /* is_long= */ false);
}
void IntrinsicLocationsBuilderX86_64::VisitLongBitCount(HInvoke* invoke) {
@@ -2369,7 +2369,7 @@
}
void IntrinsicCodeGeneratorX86_64::VisitLongBitCount(HInvoke* invoke) {
- GenBitCount(GetAssembler(), codegen_, invoke, /* is_long */ true);
+ GenBitCount(GetAssembler(), codegen_, invoke, /* is_long= */ true);
}
static void CreateOneBitLocations(ArenaAllocator* allocator, HInvoke* invoke, bool is_high) {
@@ -2476,35 +2476,35 @@
}
void IntrinsicLocationsBuilderX86_64::VisitIntegerHighestOneBit(HInvoke* invoke) {
- CreateOneBitLocations(allocator_, invoke, /* is_high */ true);
+ CreateOneBitLocations(allocator_, invoke, /* is_high= */ true);
}
void IntrinsicCodeGeneratorX86_64::VisitIntegerHighestOneBit(HInvoke* invoke) {
- GenOneBit(GetAssembler(), codegen_, invoke, /* is_high */ true, /* is_long */ false);
+ GenOneBit(GetAssembler(), codegen_, invoke, /* is_high= */ true, /* is_long= */ false);
}
void IntrinsicLocationsBuilderX86_64::VisitLongHighestOneBit(HInvoke* invoke) {
- CreateOneBitLocations(allocator_, invoke, /* is_high */ true);
+ CreateOneBitLocations(allocator_, invoke, /* is_high= */ true);
}
void IntrinsicCodeGeneratorX86_64::VisitLongHighestOneBit(HInvoke* invoke) {
- GenOneBit(GetAssembler(), codegen_, invoke, /* is_high */ true, /* is_long */ true);
+ GenOneBit(GetAssembler(), codegen_, invoke, /* is_high= */ true, /* is_long= */ true);
}
void IntrinsicLocationsBuilderX86_64::VisitIntegerLowestOneBit(HInvoke* invoke) {
- CreateOneBitLocations(allocator_, invoke, /* is_high */ false);
+ CreateOneBitLocations(allocator_, invoke, /* is_high= */ false);
}
void IntrinsicCodeGeneratorX86_64::VisitIntegerLowestOneBit(HInvoke* invoke) {
- GenOneBit(GetAssembler(), codegen_, invoke, /* is_high */ false, /* is_long */ false);
+ GenOneBit(GetAssembler(), codegen_, invoke, /* is_high= */ false, /* is_long= */ false);
}
void IntrinsicLocationsBuilderX86_64::VisitLongLowestOneBit(HInvoke* invoke) {
- CreateOneBitLocations(allocator_, invoke, /* is_high */ false);
+ CreateOneBitLocations(allocator_, invoke, /* is_high= */ false);
}
void IntrinsicCodeGeneratorX86_64::VisitLongLowestOneBit(HInvoke* invoke) {
- GenOneBit(GetAssembler(), codegen_, invoke, /* is_high */ false, /* is_long */ true);
+ GenOneBit(GetAssembler(), codegen_, invoke, /* is_high= */ false, /* is_long= */ true);
}
static void CreateLeadingZeroLocations(ArenaAllocator* allocator, HInvoke* invoke) {
@@ -2569,7 +2569,7 @@
}
void IntrinsicCodeGeneratorX86_64::VisitIntegerNumberOfLeadingZeros(HInvoke* invoke) {
- GenLeadingZeros(GetAssembler(), codegen_, invoke, /* is_long */ false);
+ GenLeadingZeros(GetAssembler(), codegen_, invoke, /* is_long= */ false);
}
void IntrinsicLocationsBuilderX86_64::VisitLongNumberOfLeadingZeros(HInvoke* invoke) {
@@ -2577,7 +2577,7 @@
}
void IntrinsicCodeGeneratorX86_64::VisitLongNumberOfLeadingZeros(HInvoke* invoke) {
- GenLeadingZeros(GetAssembler(), codegen_, invoke, /* is_long */ true);
+ GenLeadingZeros(GetAssembler(), codegen_, invoke, /* is_long= */ true);
}
static void CreateTrailingZeroLocations(ArenaAllocator* allocator, HInvoke* invoke) {
@@ -2637,7 +2637,7 @@
}
void IntrinsicCodeGeneratorX86_64::VisitIntegerNumberOfTrailingZeros(HInvoke* invoke) {
- GenTrailingZeros(GetAssembler(), codegen_, invoke, /* is_long */ false);
+ GenTrailingZeros(GetAssembler(), codegen_, invoke, /* is_long= */ false);
}
void IntrinsicLocationsBuilderX86_64::VisitLongNumberOfTrailingZeros(HInvoke* invoke) {
@@ -2645,7 +2645,7 @@
}
void IntrinsicCodeGeneratorX86_64::VisitLongNumberOfTrailingZeros(HInvoke* invoke) {
- GenTrailingZeros(GetAssembler(), codegen_, invoke, /* is_long */ true);
+ GenTrailingZeros(GetAssembler(), codegen_, invoke, /* is_long= */ true);
}
void IntrinsicLocationsBuilderX86_64::VisitIntegerValueOf(HInvoke* invoke) {
@@ -2716,7 +2716,7 @@
X86_64Assembler* assembler = GetAssembler();
CpuRegister out = invoke->GetLocations()->Out().AsRegister<CpuRegister>();
Address address = Address::Absolute
- (Thread::InterruptedOffset<kX86_64PointerSize>().Int32Value(), /* no_rip */ true);
+ (Thread::InterruptedOffset<kX86_64PointerSize>().Int32Value(), /* no_rip= */ true);
NearLabel done;
__ gs()->movl(out, address);
__ testl(out, out);
@@ -2739,6 +2739,7 @@
UNIMPLEMENTED_INTRINSIC(X86_64, DoubleIsInfinite)
UNIMPLEMENTED_INTRINSIC(X86_64, CRC32Update)
UNIMPLEMENTED_INTRINSIC(X86_64, CRC32UpdateBytes)
+UNIMPLEMENTED_INTRINSIC(X86_64, CRC32UpdateByteBuffer)
UNIMPLEMENTED_INTRINSIC(X86_64, StringStringIndexOf);
UNIMPLEMENTED_INTRINSIC(X86_64, StringStringIndexOfAfter);
diff --git a/compiler/optimizing/loop_optimization_test.cc b/compiler/optimizing/loop_optimization_test.cc
index c7cc661..310d98b 100644
--- a/compiler/optimizing/loop_optimization_test.cc
+++ b/compiler/optimizing/loop_optimization_test.cc
@@ -30,7 +30,7 @@
: graph_(CreateGraph()),
iva_(new (GetAllocator()) HInductionVarAnalysis(graph_)),
loop_opt_(new (GetAllocator()) HLoopOptimization(
- graph_, /* compiler_options */ nullptr, iva_, /* stats */ nullptr)) {
+ graph_, /* compiler_options= */ nullptr, iva_, /* stats= */ nullptr)) {
BuildGraph();
}
diff --git a/compiler/optimizing/nodes.cc b/compiler/optimizing/nodes.cc
index d1fba31..f7c16d1 100644
--- a/compiler/optimizing/nodes.cc
+++ b/compiler/optimizing/nodes.cc
@@ -44,7 +44,7 @@
// Create the inexact Object reference type and store it in the HGraph.
inexact_object_rti_ = ReferenceTypeInfo::Create(
handles->NewHandle(GetClassRoot<mirror::Object>()),
- /* is_exact */ false);
+ /* is_exact= */ false);
}
void HGraph::AddBlock(HBasicBlock* block) {
@@ -60,7 +60,7 @@
ScopedArenaAllocator allocator(GetArenaStack());
// Nodes that we're currently visiting, indexed by block id.
ArenaBitVector visiting(
- &allocator, blocks_.size(), /* expandable */ false, kArenaAllocGraphBuilder);
+ &allocator, blocks_.size(), /* expandable= */ false, kArenaAllocGraphBuilder);
visiting.ClearAllBits();
// Number of successors visited from a given node, indexed by block id.
ScopedArenaVector<size_t> successors_visited(blocks_.size(),
@@ -689,7 +689,7 @@
}
const char* HGraph::GetMethodName() const {
- const DexFile::MethodId& method_id = dex_file_.GetMethodId(method_idx_);
+ const dex::MethodId& method_id = dex_file_.GetMethodId(method_idx_);
return dex_file_.GetMethodName(method_id);
}
@@ -826,7 +826,7 @@
ScopedArenaAllocator allocator(graph->GetArenaStack());
ArenaBitVector visited(&allocator,
graph->GetBlocks().size(),
- /* expandable */ false,
+ /* expandable= */ false,
kArenaAllocGraphBuilder);
visited.ClearAllBits();
// Stop marking blocks at the loop header.
@@ -2527,7 +2527,7 @@
current->SetGraph(outer_graph);
outer_graph->AddBlock(current);
outer_graph->reverse_post_order_[++index_of_at] = current;
- UpdateLoopAndTryInformationOfNewBlock(current, at, /* replace_if_back_edge */ false);
+ UpdateLoopAndTryInformationOfNewBlock(current, at, /* replace_if_back_edge= */ false);
}
}
@@ -2537,7 +2537,7 @@
outer_graph->reverse_post_order_[++index_of_at] = to;
// Only `to` can become a back edge, as the inlined blocks
// are predecessors of `to`.
- UpdateLoopAndTryInformationOfNewBlock(to, at, /* replace_if_back_edge */ true);
+ UpdateLoopAndTryInformationOfNewBlock(to, at, /* replace_if_back_edge= */ true);
// Update all predecessors of the exit block (now the `to` block)
// to not `HReturn` but `HGoto` instead. Special case throwing blocks
@@ -2711,13 +2711,13 @@
DCHECK((old_pre_header->GetLoopInformation() == nullptr) ||
!old_pre_header->GetLoopInformation()->IsBackEdge(*old_pre_header));
UpdateLoopAndTryInformationOfNewBlock(
- if_block, old_pre_header, /* replace_if_back_edge */ false);
+ if_block, old_pre_header, /* replace_if_back_edge= */ false);
UpdateLoopAndTryInformationOfNewBlock(
- true_block, old_pre_header, /* replace_if_back_edge */ false);
+ true_block, old_pre_header, /* replace_if_back_edge= */ false);
UpdateLoopAndTryInformationOfNewBlock(
- false_block, old_pre_header, /* replace_if_back_edge */ false);
+ false_block, old_pre_header, /* replace_if_back_edge= */ false);
UpdateLoopAndTryInformationOfNewBlock(
- new_pre_header, old_pre_header, /* replace_if_back_edge */ false);
+ new_pre_header, old_pre_header, /* replace_if_back_edge= */ false);
}
HBasicBlock* HGraph::TransformLoopForVectorization(HBasicBlock* header,
diff --git a/compiler/optimizing/nodes.h b/compiler/optimizing/nodes.h
index 13c8684..686a2de 100644
--- a/compiler/optimizing/nodes.h
+++ b/compiler/optimizing/nodes.h
@@ -3242,7 +3242,7 @@
SideEffects::All(),
dex_pc,
allocator,
- /* number_of_inputs */ 1,
+ /* number_of_inputs= */ 1,
kArenaAllocMisc) {
SetPackedFlag<kFieldCanBeMoved>(false);
SetPackedField<DeoptimizeKindField>(kind);
@@ -3267,7 +3267,7 @@
SideEffects::CanTriggerGC(),
dex_pc,
allocator,
- /* number_of_inputs */ 2,
+ /* number_of_inputs= */ 2,
kArenaAllocMisc) {
SetPackedFlag<kFieldCanBeMoved>(true);
SetPackedField<DeoptimizeKindField>(kind);
@@ -4399,7 +4399,7 @@
: HInvoke(kInvokeUnresolved,
allocator,
number_of_arguments,
- 0u /* number_of_other_inputs */,
+ /* number_of_other_inputs= */ 0u,
return_type,
dex_pc,
dex_method_index,
@@ -4425,7 +4425,7 @@
: HInvoke(kInvokePolymorphic,
allocator,
number_of_arguments,
- 0u /* number_of_other_inputs */,
+ /* number_of_other_inputs= */ 0u,
return_type,
dex_pc,
dex_method_index,
@@ -4451,11 +4451,11 @@
: HInvoke(kInvokeCustom,
allocator,
number_of_arguments,
- /* number_of_other_inputs */ 0u,
+ /* number_of_other_inputs= */ 0u,
return_type,
dex_pc,
- /* dex_method_index */ dex::kDexNoIndex,
- /* resolved_method */ nullptr,
+ /* dex_method_index= */ dex::kDexNoIndex,
+ /* resolved_method= */ nullptr,
kStatic),
call_site_index_(call_site_index) {
}
@@ -5894,7 +5894,7 @@
type,
SideEffects::ArrayReadOfType(type),
dex_pc,
- /* is_string_char_at */ false) {
+ /* is_string_char_at= */ false) {
}
HArrayGet(HInstruction* array,
@@ -6336,7 +6336,7 @@
ReferenceTypeInfo GetLoadedClassRTI() {
if (GetPackedFlag<kFlagValidLoadedClassRTI>()) {
// Note: The is_exact flag from the return value should not be used.
- return ReferenceTypeInfo::CreateUnchecked(klass_, /* is_exact */ true);
+ return ReferenceTypeInfo::CreateUnchecked(klass_, /* is_exact= */ true);
} else {
return ReferenceTypeInfo::CreateInvalid();
}
@@ -7089,7 +7089,7 @@
side_effects,
dex_pc,
allocator,
- /* number_of_inputs */ check_kind == TypeCheckKind::kBitstringCheck ? 4u : 2u,
+ /* number_of_inputs= */ check_kind == TypeCheckKind::kBitstringCheck ? 4u : 2u,
kArenaAllocTypeCheckInputs),
klass_(klass) {
SetPackedField<TypeCheckKindField>(check_kind);
@@ -7145,7 +7145,7 @@
ReferenceTypeInfo GetTargetClassRTI() {
if (GetPackedFlag<kFlagValidTargetClassRTI>()) {
// Note: The is_exact flag from the return value should not be used.
- return ReferenceTypeInfo::CreateUnchecked(klass_, /* is_exact */ true);
+ return ReferenceTypeInfo::CreateUnchecked(klass_, /* is_exact= */ true);
} else {
return ReferenceTypeInfo::CreateInvalid();
}
@@ -7458,7 +7458,7 @@
SideEffects::AllReads(),
dex_pc,
allocator,
- /* number_of_inputs */ 1,
+ /* number_of_inputs= */ 1,
kArenaAllocConstructorFenceInputs) {
DCHECK(fence_object != nullptr);
SetRawInputAt(0, fence_object);
diff --git a/compiler/optimizing/nodes_vector.h b/compiler/optimizing/nodes_vector.h
index cd4f45e..efe4d6b 100644
--- a/compiler/optimizing/nodes_vector.h
+++ b/compiler/optimizing/nodes_vector.h
@@ -207,7 +207,7 @@
allocator,
packed_type,
SideEffects::None(),
- /* number_of_inputs */ 1,
+ /* number_of_inputs= */ 1,
vector_length,
dex_pc) {
SetRawInputAt(0, input);
@@ -235,7 +235,7 @@
allocator,
packed_type,
SideEffects::None(),
- /* number_of_inputs */ 2,
+ /* number_of_inputs= */ 2,
vector_length,
dex_pc) {
SetRawInputAt(0, left);
@@ -948,7 +948,7 @@
allocator,
packed_type,
SideEffects::None(),
- /* number_of_inputs */ 3,
+ /* number_of_inputs= */ 3,
vector_length,
dex_pc),
op_kind_(op) {
@@ -1002,7 +1002,7 @@
allocator,
packed_type,
SideEffects::None(),
- /* number_of_inputs */ 3,
+ /* number_of_inputs= */ 3,
vector_length,
dex_pc) {
DCHECK(HasConsistentPackedTypes(accumulator, packed_type));
@@ -1049,7 +1049,7 @@
allocator,
packed_type,
SideEffects::None(),
- /* number_of_inputs */ 3,
+ /* number_of_inputs= */ 3,
vector_length,
dex_pc) {
DCHECK(HasConsistentPackedTypes(accumulator, packed_type));
@@ -1097,7 +1097,7 @@
allocator,
packed_type,
side_effects,
- /* number_of_inputs */ 2,
+ /* number_of_inputs= */ 2,
vector_length,
dex_pc) {
SetRawInputAt(0, base);
@@ -1143,7 +1143,7 @@
allocator,
packed_type,
side_effects,
- /* number_of_inputs */ 3,
+ /* number_of_inputs= */ 3,
vector_length,
dex_pc) {
DCHECK(HasConsistentPackedTypes(value, packed_type));
diff --git a/compiler/optimizing/optimization.cc b/compiler/optimizing/optimization.cc
index b75afad..8864a12 100644
--- a/compiler/optimizing/optimization.cc
+++ b/compiler/optimizing/optimization.cc
@@ -260,9 +260,9 @@
handles,
stats,
accessor.RegistersSize(),
- /* total_number_of_instructions */ 0,
- /* parent */ nullptr,
- /* depth */ 0,
+ /* total_number_of_instructions= */ 0,
+ /* parent= */ nullptr,
+ /* depth= */ 0,
pass_name);
break;
}
diff --git a/compiler/optimizing/optimizing_compiler.cc b/compiler/optimizing/optimizing_compiler.cc
index 4936a6d..3b34e8d 100644
--- a/compiler/optimizing/optimizing_compiler.cc
+++ b/compiler/optimizing/optimizing_compiler.cc
@@ -162,7 +162,7 @@
VLOG(compiler) << "Starting pass: " << pass_name;
// Dump graph first, then start timer.
if (visualizer_enabled_) {
- visualizer_.DumpGraph(pass_name, /* is_after_pass */ false, graph_in_bad_state_);
+ visualizer_.DumpGraph(pass_name, /* is_after_pass= */ false, graph_in_bad_state_);
FlushVisualizer();
}
if (timing_logger_enabled_) {
@@ -184,7 +184,7 @@
timing_logger_.EndTiming();
}
if (visualizer_enabled_) {
- visualizer_.DumpGraph(pass_name, /* is_after_pass */ true, graph_in_bad_state_);
+ visualizer_.DumpGraph(pass_name, /* is_after_pass= */ true, graph_in_bad_state_);
FlushVisualizer();
}
@@ -272,7 +272,7 @@
bool CanCompileMethod(uint32_t method_idx, const DexFile& dex_file) const override;
- CompiledMethod* Compile(const DexFile::CodeItem* code_item,
+ CompiledMethod* Compile(const dex::CodeItem* code_item,
uint32_t access_flags,
InvokeType invoke_type,
uint16_t class_def_idx,
@@ -370,7 +370,7 @@
CompiledMethod* Emit(ArenaAllocator* allocator,
CodeVectorAllocator* code_allocator,
CodeGenerator* codegen,
- const DexFile::CodeItem* item) const;
+ const dex::CodeItem* item) const;
// Try compiling a method and return the code generator used for
// compiling it.
@@ -760,7 +760,7 @@
CompiledMethod* OptimizingCompiler::Emit(ArenaAllocator* allocator,
CodeVectorAllocator* code_allocator,
CodeGenerator* codegen,
- const DexFile::CodeItem* code_item_for_osr_check) const {
+ const dex::CodeItem* code_item_for_osr_check) const {
ArenaVector<linker::LinkerPatch> linker_patches = EmitAndSortLinkerPatches(codegen);
ScopedArenaVector<uint8_t> stack_map = codegen->BuildStackMaps(code_item_for_osr_check);
@@ -799,7 +799,7 @@
InstructionSet instruction_set = compiler_options.GetInstructionSet();
const DexFile& dex_file = *dex_compilation_unit.GetDexFile();
uint32_t method_idx = dex_compilation_unit.GetDexMethodIndex();
- const DexFile::CodeItem* code_item = dex_compilation_unit.GetCodeItem();
+ const dex::CodeItem* code_item = dex_compilation_unit.GetCodeItem();
// Always use the Thumb-2 assembler: some runtime functionality
// (like implicit stack overflow checks) assume Thumb-2.
@@ -964,7 +964,7 @@
compiler_options.GetInstructionSet(),
kInvalidInvokeType,
compiler_options.GetDebuggable(),
- /* osr */ false);
+ /* osr= */ false);
DCHECK(Runtime::Current()->IsAotCompiler());
DCHECK(method != nullptr);
@@ -994,7 +994,7 @@
&dex_compilation_unit,
codegen.get(),
compilation_stats_.get(),
- /* interpreter_metadata */ ArrayRef<const uint8_t>(),
+ /* interpreter_metadata= */ ArrayRef<const uint8_t>(),
handles);
builder.BuildIntrinsicGraph(method);
}
@@ -1033,7 +1033,7 @@
return codegen.release();
}
-CompiledMethod* OptimizingCompiler::Compile(const DexFile::CodeItem* code_item,
+CompiledMethod* OptimizingCompiler::Compile(const dex::CodeItem* code_item,
uint32_t access_flags,
InvokeType invoke_type,
uint16_t class_def_idx,
@@ -1161,7 +1161,7 @@
jni_compiled_method.GetFrameSize(),
jni_compiled_method.GetCoreSpillMask(),
jni_compiled_method.GetFpSpillMask(),
- /* num_dex_registers */ 0);
+ /* num_dex_registers= */ 0);
stack_map_stream->EndMethod();
return stack_map_stream->Encode();
}
@@ -1208,7 +1208,7 @@
CompiledMethod* compiled_method = Emit(&allocator,
&code_allocator,
codegen.get(),
- /* code_item_for_osr_check */ nullptr);
+ /* item= */ nullptr);
compiled_method->MarkAsIntrinsic();
return compiled_method;
}
@@ -1228,7 +1228,7 @@
jni_compiled_method.GetCode(),
ArrayRef<const uint8_t>(stack_map),
jni_compiled_method.GetCfi(),
- /* patches */ ArrayRef<const linker::LinkerPatch>());
+ /* patches= */ ArrayRef<const linker::LinkerPatch>());
}
Compiler* CreateOptimizingCompiler(CompilerDriver* driver) {
@@ -1254,7 +1254,7 @@
const DexFile* dex_file = method->GetDexFile();
const uint16_t class_def_idx = method->GetClassDefIndex();
- const DexFile::CodeItem* code_item = dex_file->GetCodeItem(method->GetCodeItemOffset());
+ const dex::CodeItem* code_item = dex_file->GetCodeItem(method->GetCodeItemOffset());
const uint32_t method_idx = method->GetDexMethodIndex();
const uint32_t access_flags = method->GetAccessFlags();
@@ -1277,7 +1277,7 @@
uint8_t* roots_data = nullptr;
uint32_t data_size = code_cache->ReserveData(self,
stack_map.size(),
- /* number_of_roots */ 0,
+ /* number_of_roots= */ 0,
method,
&stack_map_data,
&roots_data);
@@ -1297,7 +1297,7 @@
data_size,
osr,
roots,
- /* has_should_deoptimize_flag */ false,
+ /* has_should_deoptimize_flag= */ false,
cha_single_implementation_list);
if (code == nullptr) {
return false;
diff --git a/compiler/optimizing/optimizing_unit_test.h b/compiler/optimizing/optimizing_unit_test.h
index f903f82..e5f6941 100644
--- a/compiler/optimizing/optimizing_unit_test.h
+++ b/compiler/optimizing/optimizing_unit_test.h
@@ -155,7 +155,7 @@
void* aligned_data = GetAllocator()->Alloc(code_item_size);
memcpy(aligned_data, &data[0], code_item_size);
CHECK_ALIGNED(aligned_data, StandardDexFile::CodeItem::kAlignment);
- const DexFile::CodeItem* code_item = reinterpret_cast<const DexFile::CodeItem*>(aligned_data);
+ const dex::CodeItem* code_item = reinterpret_cast<const dex::CodeItem*>(aligned_data);
{
ScopedObjectAccess soa(Thread::Current());
@@ -165,13 +165,13 @@
const DexCompilationUnit* dex_compilation_unit =
new (graph->GetAllocator()) DexCompilationUnit(
handles_->NewHandle<mirror::ClassLoader>(nullptr),
- /* class_linker */ nullptr,
+ /* class_linker= */ nullptr,
graph->GetDexFile(),
code_item,
- /* class_def_index */ DexFile::kDexNoIndex16,
- /* method_idx */ dex::kDexNoIndex,
- /* access_flags */ 0u,
- /* verified_method */ nullptr,
+ /* class_def_index= */ DexFile::kDexNoIndex16,
+ /* method_idx= */ dex::kDexNoIndex,
+ /* access_flags= */ 0u,
+ /* verified_method= */ nullptr,
handles_->NewHandle<mirror::DexCache>(nullptr));
CodeItemDebugInfoAccessor accessor(graph->GetDexFile(), code_item, /*dex_method_idx*/ 0u);
HGraphBuilder builder(graph, dex_compilation_unit, accessor, handles_.get(), return_type);
diff --git a/compiler/optimizing/prepare_for_register_allocation.cc b/compiler/optimizing/prepare_for_register_allocation.cc
index 12db8a0..fbdbf9d 100644
--- a/compiler/optimizing/prepare_for_register_allocation.cc
+++ b/compiler/optimizing/prepare_for_register_allocation.cc
@@ -87,9 +87,9 @@
if (GetGraph()->GetArtMethod() != char_at_method) {
ArenaAllocator* allocator = GetGraph()->GetAllocator();
HEnvironment* environment = new (allocator) HEnvironment(allocator,
- /* number_of_vregs */ 0u,
+ /* number_of_vregs= */ 0u,
char_at_method,
- /* dex_pc */ dex::kDexNoIndex,
+ /* dex_pc= */ dex::kDexNoIndex,
check);
check->InsertRawEnvironment(environment);
}
diff --git a/compiler/optimizing/reference_type_propagation.cc b/compiler/optimizing/reference_type_propagation.cc
index 9079658..61e7a60 100644
--- a/compiler/optimizing/reference_type_propagation.cc
+++ b/compiler/optimizing/reference_type_propagation.cc
@@ -278,7 +278,7 @@
if (ShouldCreateBoundType(
insert_point, receiver, class_rti, start_instruction, start_block)) {
bound_type = new (receiver->GetBlock()->GetGraph()->GetAllocator()) HBoundType(receiver);
- bound_type->SetUpperBound(class_rti, /* bound_can_be_null */ false);
+ bound_type->SetUpperBound(class_rti, /* can_be_null= */ false);
start_block->InsertInstructionBefore(bound_type, insert_point);
// To comply with the RTP algorithm, don't type the bound type just yet, it will
// be handled in RTPVisitor::VisitBoundType.
@@ -350,7 +350,7 @@
HBasicBlock* trueBlock = compare->IsEqual()
? check->AsIf()->IfTrueSuccessor()
: check->AsIf()->IfFalseSuccessor();
- BoundTypeIn(receiver, trueBlock, /* start_instruction */ nullptr, class_rti);
+ BoundTypeIn(receiver, trueBlock, /* start_instruction= */ nullptr, class_rti);
} else {
DCHECK(check->IsDeoptimize());
if (compare->IsEqual() && check->AsDeoptimize()->GuardsAnInput()) {
@@ -427,9 +427,9 @@
: ifInstruction->IfFalseSuccessor();
ReferenceTypeInfo object_rti = ReferenceTypeInfo::Create(
- handle_cache_->GetObjectClassHandle(), /* is_exact */ false);
+ handle_cache_->GetObjectClassHandle(), /* is_exact= */ false);
- BoundTypeIn(obj, notNullBlock, /* start_instruction */ nullptr, object_rti);
+ BoundTypeIn(obj, notNullBlock, /* start_instruction= */ nullptr, object_rti);
}
// Returns true if one of the patterns below has been recognized. If so, the
@@ -538,10 +538,10 @@
{
ScopedObjectAccess soa(Thread::Current());
if (!class_rti.GetTypeHandle()->CannotBeAssignedFromOtherTypes()) {
- class_rti = ReferenceTypeInfo::Create(class_rti.GetTypeHandle(), /* is_exact */ false);
+ class_rti = ReferenceTypeInfo::Create(class_rti.GetTypeHandle(), /* is_exact= */ false);
}
}
- BoundTypeIn(obj, instanceOfTrueBlock, /* start_instruction */ nullptr, class_rti);
+ BoundTypeIn(obj, instanceOfTrueBlock, /* start_instruction= */ nullptr, class_rti);
}
void ReferenceTypePropagation::RTPVisitor::SetClassAsTypeInfo(HInstruction* instr,
@@ -561,7 +561,7 @@
// Use a null loader, the target method is in a boot classpath dex file.
Handle<mirror::ClassLoader> loader(hs.NewHandle<mirror::ClassLoader>(nullptr));
ArtMethod* method = cl->ResolveMethod<ClassLinker::ResolveMode::kNoChecks>(
- dex_method_index, dex_cache, loader, /* referrer */ nullptr, kDirect);
+ dex_method_index, dex_cache, loader, /* referrer= */ nullptr, kDirect);
DCHECK(method != nullptr);
ObjPtr<mirror::Class> declaring_class = method->GetDeclaringClass();
DCHECK(declaring_class != nullptr);
@@ -571,7 +571,7 @@
<< "Expected String.<init>: " << method->PrettyMethod();
}
instr->SetReferenceTypeInfo(
- ReferenceTypeInfo::Create(handle_cache_->GetStringClassHandle(), /* is_exact */ true));
+ ReferenceTypeInfo::Create(handle_cache_->GetStringClassHandle(), /* is_exact= */ true));
} else if (IsAdmissible(klass)) {
ReferenceTypeInfo::TypeHandle handle = handle_cache_->NewHandle(klass);
is_exact = is_exact || handle->CannotBeAssignedFromOtherTypes();
@@ -600,12 +600,12 @@
void ReferenceTypePropagation::RTPVisitor::VisitNewInstance(HNewInstance* instr) {
ScopedObjectAccess soa(Thread::Current());
- SetClassAsTypeInfo(instr, instr->GetLoadClass()->GetClass().Get(), /* is_exact */ true);
+ SetClassAsTypeInfo(instr, instr->GetLoadClass()->GetClass().Get(), /* is_exact= */ true);
}
void ReferenceTypePropagation::RTPVisitor::VisitNewArray(HNewArray* instr) {
ScopedObjectAccess soa(Thread::Current());
- SetClassAsTypeInfo(instr, instr->GetLoadClass()->GetClass().Get(), /* is_exact */ true);
+ SetClassAsTypeInfo(instr, instr->GetLoadClass()->GetClass().Get(), /* is_exact= */ true);
}
void ReferenceTypePropagation::RTPVisitor::VisitParameterValue(HParameterValue* instr) {
@@ -614,7 +614,7 @@
UpdateReferenceTypeInfo(instr,
instr->GetTypeIndex(),
instr->GetDexFile(),
- /* is_exact */ false);
+ /* is_exact= */ false);
}
}
@@ -632,7 +632,7 @@
klass = info.GetField()->LookupResolvedType();
}
- SetClassAsTypeInfo(instr, klass, /* is_exact */ false);
+ SetClassAsTypeInfo(instr, klass, /* is_exact= */ false);
}
void ReferenceTypePropagation::RTPVisitor::VisitInstanceFieldGet(HInstanceFieldGet* instr) {
@@ -665,7 +665,7 @@
instr->SetValidLoadedClassRTI();
}
instr->SetReferenceTypeInfo(
- ReferenceTypeInfo::Create(handle_cache_->GetClassClassHandle(), /* is_exact */ true));
+ ReferenceTypeInfo::Create(handle_cache_->GetClassClassHandle(), /* is_exact= */ true));
}
void ReferenceTypePropagation::RTPVisitor::VisitInstanceOf(HInstanceOf* instr) {
@@ -682,17 +682,17 @@
void ReferenceTypePropagation::RTPVisitor::VisitLoadMethodHandle(HLoadMethodHandle* instr) {
instr->SetReferenceTypeInfo(ReferenceTypeInfo::Create(
handle_cache_->GetMethodHandleClassHandle(),
- /* is_exact */ true));
+ /* is_exact= */ true));
}
void ReferenceTypePropagation::RTPVisitor::VisitLoadMethodType(HLoadMethodType* instr) {
instr->SetReferenceTypeInfo(
- ReferenceTypeInfo::Create(handle_cache_->GetMethodTypeClassHandle(), /* is_exact */ true));
+ ReferenceTypeInfo::Create(handle_cache_->GetMethodTypeClassHandle(), /* is_exact= */ true));
}
void ReferenceTypePropagation::RTPVisitor::VisitLoadString(HLoadString* instr) {
instr->SetReferenceTypeInfo(
- ReferenceTypeInfo::Create(handle_cache_->GetStringClassHandle(), /* is_exact */ true));
+ ReferenceTypeInfo::Create(handle_cache_->GetStringClassHandle(), /* is_exact= */ true));
}
void ReferenceTypePropagation::RTPVisitor::VisitLoadException(HLoadException* instr) {
@@ -701,12 +701,12 @@
if (catch_info->IsCatchAllTypeIndex()) {
instr->SetReferenceTypeInfo(
- ReferenceTypeInfo::Create(handle_cache_->GetThrowableClassHandle(), /* is_exact */ false));
+ ReferenceTypeInfo::Create(handle_cache_->GetThrowableClassHandle(), /* is_exact= */ false));
} else {
UpdateReferenceTypeInfo(instr,
catch_info->GetCatchTypeIndex(),
catch_info->GetCatchDexFile(),
- /* is_exact */ false);
+ /* is_exact= */ false);
}
}
@@ -736,7 +736,7 @@
// bound type is dead. To not confuse potential other optimizations, we mark
// the bound as non-exact.
instr->SetReferenceTypeInfo(
- ReferenceTypeInfo::Create(class_rti.GetTypeHandle(), /* is_exact */ false));
+ ReferenceTypeInfo::Create(class_rti.GetTypeHandle(), /* is_exact= */ false));
}
} else {
// Object not typed yet. Leave BoundType untyped for now rather than
@@ -914,7 +914,7 @@
ScopedObjectAccess soa(Thread::Current());
ArtMethod* method = instr->GetResolvedMethod();
ObjPtr<mirror::Class> klass = (method == nullptr) ? nullptr : method->LookupResolvedReturnType();
- SetClassAsTypeInfo(instr, klass, /* is_exact */ false);
+ SetClassAsTypeInfo(instr, klass, /* is_exact= */ false);
}
void ReferenceTypePropagation::RTPVisitor::VisitArrayGet(HArrayGet* instr) {
@@ -947,7 +947,7 @@
// bound type is dead. To not confuse potential other optimizations, we mark
// the bound as non-exact.
instr->SetReferenceTypeInfo(
- ReferenceTypeInfo::Create(upper_bound_rti.GetTypeHandle(), /* is_exact */ false));
+ ReferenceTypeInfo::Create(upper_bound_rti.GetTypeHandle(), /* is_exact= */ false));
}
}
diff --git a/compiler/optimizing/register_allocation_resolver.cc b/compiler/optimizing/register_allocation_resolver.cc
index 27f9ac3..b1f0a1a 100644
--- a/compiler/optimizing/register_allocation_resolver.cc
+++ b/compiler/optimizing/register_allocation_resolver.cc
@@ -280,16 +280,16 @@
LocationSummary* locations = instruction->GetLocations();
if (locations->OnlyCallsOnSlowPath()) {
size_t core_spills =
- codegen_->GetNumberOfSlowPathSpills(locations, /* core_registers */ true);
+ codegen_->GetNumberOfSlowPathSpills(locations, /* core_registers= */ true);
size_t fp_spills =
- codegen_->GetNumberOfSlowPathSpills(locations, /* core_registers */ false);
+ codegen_->GetNumberOfSlowPathSpills(locations, /* core_registers= */ false);
size_t spill_size =
core_register_spill_size * core_spills + fp_register_spill_size * fp_spills;
maximum_safepoint_spill_size = std::max(maximum_safepoint_spill_size, spill_size);
} else if (locations->CallsOnMainAndSlowPath()) {
// Nothing to spill on the slow path if the main path already clobbers caller-saves.
- DCHECK_EQ(0u, codegen_->GetNumberOfSlowPathSpills(locations, /* core_registers */ true));
- DCHECK_EQ(0u, codegen_->GetNumberOfSlowPathSpills(locations, /* core_registers */ false));
+ DCHECK_EQ(0u, codegen_->GetNumberOfSlowPathSpills(locations, /* core_registers= */ true));
+ DCHECK_EQ(0u, codegen_->GetNumberOfSlowPathSpills(locations, /* core_registers= */ false));
}
}
return maximum_safepoint_spill_size;
diff --git a/compiler/optimizing/register_allocator_linear_scan.cc b/compiler/optimizing/register_allocator_linear_scan.cc
index 1e00003..0d6c5a3 100644
--- a/compiler/optimizing/register_allocator_linear_scan.cc
+++ b/compiler/optimizing/register_allocator_linear_scan.cc
@@ -252,7 +252,7 @@
temp_intervals_.push_back(interval);
interval->AddTempUse(instruction, i);
if (codegen_->NeedsTwoRegisters(DataType::Type::kFloat64)) {
- interval->AddHighInterval(/* is_temp */ true);
+ interval->AddHighInterval(/* is_temp= */ true);
LiveInterval* high = interval->GetHighInterval();
temp_intervals_.push_back(high);
unhandled_fp_intervals_.push_back(high);
@@ -284,7 +284,7 @@
}
if (locations->WillCall()) {
- BlockRegisters(position, position + 1, /* caller_save_only */ true);
+ BlockRegisters(position, position + 1, /* caller_save_only= */ true);
}
for (size_t i = 0; i < locations->GetInputCount(); ++i) {
diff --git a/compiler/optimizing/register_allocator_test.cc b/compiler/optimizing/register_allocator_test.cc
index be5304c..79eb082 100644
--- a/compiler/optimizing/register_allocator_test.cc
+++ b/compiler/optimizing/register_allocator_test.cc
@@ -68,11 +68,11 @@
bool ValidateIntervals(const ScopedArenaVector<LiveInterval*>& intervals,
const CodeGenerator& codegen) {
return RegisterAllocator::ValidateIntervals(ArrayRef<LiveInterval* const>(intervals),
- /* number_of_spill_slots */ 0u,
- /* number_of_out_slots */ 0u,
+ /* number_of_spill_slots= */ 0u,
+ /* number_of_out_slots= */ 0u,
codegen,
- /* processing_core_registers */ true,
- /* log_fatal_on_failure */ false);
+ /* processing_core_registers= */ true,
+ /* log_fatal_on_failure= */ false);
}
};
diff --git a/compiler/optimizing/scheduler.cc b/compiler/optimizing/scheduler.cc
index df897a4..fdef45e 100644
--- a/compiler/optimizing/scheduler.cc
+++ b/compiler/optimizing/scheduler.cc
@@ -680,7 +680,7 @@
DCHECK_NE(cursor, cursor->GetBlock()->GetLastInstruction());
DCHECK(!instruction->IsControlFlow());
DCHECK(!cursor->IsControlFlow());
- instruction->MoveBefore(cursor->GetNext(), /* do_checks */ false);
+ instruction->MoveBefore(cursor->GetNext(), /* do_checks= */ false);
}
void HScheduler::Schedule(HInstruction* instruction) {
diff --git a/compiler/optimizing/scheduler_arm.cc b/compiler/optimizing/scheduler_arm.cc
index d89d117..858a555 100644
--- a/compiler/optimizing/scheduler_arm.cc
+++ b/compiler/optimizing/scheduler_arm.cc
@@ -563,7 +563,7 @@
last_visited_internal_latency_ = kArmIntegerOpLatency;
last_visited_latency_ = kArmIntegerOpLatency;
} else {
- HandleGenerateDataProcInstruction(/* internal_latency */ true);
+ HandleGenerateDataProcInstruction(/* internal_latency= */ true);
HandleGenerateDataProcInstruction();
}
}
@@ -585,8 +585,8 @@
DCHECK_LT(shift_value, 32U);
if (kind == HInstruction::kOr || kind == HInstruction::kXor) {
- HandleGenerateDataProcInstruction(/* internal_latency */ true);
- HandleGenerateDataProcInstruction(/* internal_latency */ true);
+ HandleGenerateDataProcInstruction(/* internal_latency= */ true);
+ HandleGenerateDataProcInstruction(/* internal_latency= */ true);
HandleGenerateDataProcInstruction();
} else {
last_visited_internal_latency_ += 2 * kArmIntegerOpLatency;
diff --git a/compiler/optimizing/scheduler_test.cc b/compiler/optimizing/scheduler_test.cc
index 981fcc4..e0e265a 100644
--- a/compiler/optimizing/scheduler_test.cc
+++ b/compiler/optimizing/scheduler_test.cc
@@ -148,7 +148,7 @@
SchedulingGraph scheduling_graph(scheduler,
GetScopedAllocator(),
- /* heap_location_collector */ nullptr);
+ /* heap_location_collector= */ nullptr);
// Instructions must be inserted in reverse order into the scheduling graph.
for (HInstruction* instr : ReverseRange(block_instructions)) {
scheduling_graph.AddNode(instr);
diff --git a/compiler/optimizing/side_effects_test.cc b/compiler/optimizing/side_effects_test.cc
index 4b0be07..cf26e79 100644
--- a/compiler/optimizing/side_effects_test.cc
+++ b/compiler/optimizing/side_effects_test.cc
@@ -141,13 +141,13 @@
TEST(SideEffectsTest, VolatileDependences) {
SideEffects volatile_write =
- SideEffects::FieldWriteOfType(DataType::Type::kInt32, /* is_volatile */ true);
+ SideEffects::FieldWriteOfType(DataType::Type::kInt32, /* is_volatile= */ true);
SideEffects any_write =
- SideEffects::FieldWriteOfType(DataType::Type::kInt32, /* is_volatile */ false);
+ SideEffects::FieldWriteOfType(DataType::Type::kInt32, /* is_volatile= */ false);
SideEffects volatile_read =
- SideEffects::FieldReadOfType(DataType::Type::kInt8, /* is_volatile */ true);
+ SideEffects::FieldReadOfType(DataType::Type::kInt8, /* is_volatile= */ true);
SideEffects any_read =
- SideEffects::FieldReadOfType(DataType::Type::kInt8, /* is_volatile */ false);
+ SideEffects::FieldReadOfType(DataType::Type::kInt8, /* is_volatile= */ false);
EXPECT_FALSE(volatile_write.MayDependOn(any_read));
EXPECT_TRUE(any_read.MayDependOn(volatile_write));
@@ -163,15 +163,15 @@
TEST(SideEffectsTest, SameWidthTypesNoAlias) {
// Type I/F.
testNoWriteAndReadDependence(
- SideEffects::FieldWriteOfType(DataType::Type::kInt32, /* is_volatile */ false),
- SideEffects::FieldReadOfType(DataType::Type::kFloat32, /* is_volatile */ false));
+ SideEffects::FieldWriteOfType(DataType::Type::kInt32, /* is_volatile= */ false),
+ SideEffects::FieldReadOfType(DataType::Type::kFloat32, /* is_volatile= */ false));
testNoWriteAndReadDependence(
SideEffects::ArrayWriteOfType(DataType::Type::kInt32),
SideEffects::ArrayReadOfType(DataType::Type::kFloat32));
// Type L/D.
testNoWriteAndReadDependence(
- SideEffects::FieldWriteOfType(DataType::Type::kInt64, /* is_volatile */ false),
- SideEffects::FieldReadOfType(DataType::Type::kFloat64, /* is_volatile */ false));
+ SideEffects::FieldWriteOfType(DataType::Type::kInt64, /* is_volatile= */ false),
+ SideEffects::FieldReadOfType(DataType::Type::kFloat64, /* is_volatile= */ false));
testNoWriteAndReadDependence(
SideEffects::ArrayWriteOfType(DataType::Type::kInt64),
SideEffects::ArrayReadOfType(DataType::Type::kFloat64));
@@ -181,9 +181,9 @@
SideEffects s = SideEffects::None();
// Keep taking the union of different writes and reads.
for (DataType::Type type : kTestTypes) {
- s = s.Union(SideEffects::FieldWriteOfType(type, /* is_volatile */ false));
+ s = s.Union(SideEffects::FieldWriteOfType(type, /* is_volatile= */ false));
s = s.Union(SideEffects::ArrayWriteOfType(type));
- s = s.Union(SideEffects::FieldReadOfType(type, /* is_volatile */ false));
+ s = s.Union(SideEffects::FieldReadOfType(type, /* is_volatile= */ false));
s = s.Union(SideEffects::ArrayReadOfType(type));
}
EXPECT_TRUE(s.DoesAllReadWrite());
@@ -254,10 +254,10 @@
"||I|||||",
SideEffects::ArrayReadOfType(DataType::Type::kInt32).ToString().c_str());
SideEffects s = SideEffects::None();
- s = s.Union(SideEffects::FieldWriteOfType(DataType::Type::kUint16, /* is_volatile */ false));
- s = s.Union(SideEffects::FieldWriteOfType(DataType::Type::kInt64, /* is_volatile */ false));
+ s = s.Union(SideEffects::FieldWriteOfType(DataType::Type::kUint16, /* is_volatile= */ false));
+ s = s.Union(SideEffects::FieldWriteOfType(DataType::Type::kInt64, /* is_volatile= */ false));
s = s.Union(SideEffects::ArrayWriteOfType(DataType::Type::kInt16));
- s = s.Union(SideEffects::FieldReadOfType(DataType::Type::kInt32, /* is_volatile */ false));
+ s = s.Union(SideEffects::FieldReadOfType(DataType::Type::kInt32, /* is_volatile= */ false));
s = s.Union(SideEffects::ArrayReadOfType(DataType::Type::kFloat32));
s = s.Union(SideEffects::ArrayReadOfType(DataType::Type::kFloat64));
EXPECT_STREQ("||DF|I||S|JC|", s.ToString().c_str());
diff --git a/compiler/optimizing/ssa_builder.cc b/compiler/optimizing/ssa_builder.cc
index cef234a..0d0e1ec 100644
--- a/compiler/optimizing/ssa_builder.cc
+++ b/compiler/optimizing/ssa_builder.cc
@@ -391,7 +391,7 @@
// succeed in code validated by the verifier.
HInstruction* equivalent = GetFloatOrDoubleEquivalent(value, array_type);
DCHECK(equivalent != nullptr);
- aset->ReplaceInput(equivalent, /* input_index */ 2);
+ aset->ReplaceInput(equivalent, /* index= */ 2);
if (equivalent->IsPhi()) {
// Returned equivalent is a phi which may not have had its inputs
// replaced yet. We need to run primitive type propagation on it.
@@ -525,7 +525,7 @@
class_loader_,
dex_cache_,
handles_,
- /* is_first_run */ true).Run();
+ /* is_first_run= */ true).Run();
// HInstructionBuilder duplicated ArrayGet instructions with ambiguous type
// (int/float or long/double) and marked ArraySets with ambiguous input type.
diff --git a/compiler/optimizing/ssa_liveness_analysis.cc b/compiler/optimizing/ssa_liveness_analysis.cc
index 62a70d6..7b2c3a9 100644
--- a/compiler/optimizing/ssa_liveness_analysis.cc
+++ b/compiler/optimizing/ssa_liveness_analysis.cc
@@ -120,7 +120,7 @@
DCHECK(input->HasSsaIndex());
// `input` generates a result used by `current`. Add use and update
// the live-in set.
- input->GetLiveInterval()->AddUse(current, /* environment */ nullptr, i, actual_user);
+ input->GetLiveInterval()->AddUse(current, /* environment= */ nullptr, i, actual_user);
live_in->SetBit(input->GetSsaIndex());
} else if (has_out_location) {
// `input` generates a result but it is not used by `current`.
diff --git a/compiler/optimizing/ssa_liveness_analysis_test.cc b/compiler/optimizing/ssa_liveness_analysis_test.cc
index 4b52553..352c44f 100644
--- a/compiler/optimizing/ssa_liveness_analysis_test.cc
+++ b/compiler/optimizing/ssa_liveness_analysis_test.cc
@@ -94,25 +94,25 @@
HInstruction* null_check = new (GetAllocator()) HNullCheck(array, 0);
block->AddInstruction(null_check);
HEnvironment* null_check_env = new (GetAllocator()) HEnvironment(GetAllocator(),
- /* number_of_vregs */ 5,
- /* method */ nullptr,
- /* dex_pc */ 0u,
+ /* number_of_vregs= */ 5,
+ /* method= */ nullptr,
+ /* dex_pc= */ 0u,
null_check);
null_check_env->CopyFrom(ArrayRef<HInstruction* const>(args));
null_check->SetRawEnvironment(null_check_env);
HInstruction* length = new (GetAllocator()) HArrayLength(array, 0);
block->AddInstruction(length);
- HInstruction* bounds_check = new (GetAllocator()) HBoundsCheck(index, length, /* dex_pc */ 0u);
+ HInstruction* bounds_check = new (GetAllocator()) HBoundsCheck(index, length, /* dex_pc= */ 0u);
block->AddInstruction(bounds_check);
HEnvironment* bounds_check_env = new (GetAllocator()) HEnvironment(GetAllocator(),
- /* number_of_vregs */ 5,
- /* method */ nullptr,
- /* dex_pc */ 0u,
+ /* number_of_vregs= */ 5,
+ /* method= */ nullptr,
+ /* dex_pc= */ 0u,
bounds_check);
bounds_check_env->CopyFrom(ArrayRef<HInstruction* const>(args));
bounds_check->SetRawEnvironment(bounds_check_env);
HInstruction* array_set =
- new (GetAllocator()) HArraySet(array, index, value, DataType::Type::kInt32, /* dex_pc */ 0);
+ new (GetAllocator()) HArraySet(array, index, value, DataType::Type::kInt32, /* dex_pc= */ 0);
block->AddInstruction(array_set);
graph_->BuildDominatorTree();
@@ -163,9 +163,9 @@
HInstruction* null_check = new (GetAllocator()) HNullCheck(array, 0);
block->AddInstruction(null_check);
HEnvironment* null_check_env = new (GetAllocator()) HEnvironment(GetAllocator(),
- /* number_of_vregs */ 5,
- /* method */ nullptr,
- /* dex_pc */ 0u,
+ /* number_of_vregs= */ 5,
+ /* method= */ nullptr,
+ /* dex_pc= */ 0u,
null_check);
null_check_env->CopyFrom(ArrayRef<HInstruction* const>(args));
null_check->SetRawEnvironment(null_check_env);
@@ -175,17 +175,17 @@
HInstruction* ae = new (GetAllocator()) HAboveOrEqual(index, length);
block->AddInstruction(ae);
HInstruction* deoptimize = new(GetAllocator()) HDeoptimize(
- GetAllocator(), ae, DeoptimizationKind::kBlockBCE, /* dex_pc */ 0u);
+ GetAllocator(), ae, DeoptimizationKind::kBlockBCE, /* dex_pc= */ 0u);
block->AddInstruction(deoptimize);
HEnvironment* deoptimize_env = new (GetAllocator()) HEnvironment(GetAllocator(),
- /* number_of_vregs */ 5,
- /* method */ nullptr,
- /* dex_pc */ 0u,
+ /* number_of_vregs= */ 5,
+ /* method= */ nullptr,
+ /* dex_pc= */ 0u,
deoptimize);
deoptimize_env->CopyFrom(ArrayRef<HInstruction* const>(args));
deoptimize->SetRawEnvironment(deoptimize_env);
HInstruction* array_set =
- new (GetAllocator()) HArraySet(array, index, value, DataType::Type::kInt32, /* dex_pc */ 0);
+ new (GetAllocator()) HArraySet(array, index, value, DataType::Type::kInt32, /* dex_pc= */ 0);
block->AddInstruction(array_set);
graph_->BuildDominatorTree();
diff --git a/compiler/optimizing/ssa_phi_elimination.cc b/compiler/optimizing/ssa_phi_elimination.cc
index 5370f43..3fcb72e 100644
--- a/compiler/optimizing/ssa_phi_elimination.cc
+++ b/compiler/optimizing/ssa_phi_elimination.cc
@@ -141,7 +141,7 @@
ArenaBitVector visited_phis_in_cycle(&allocator,
graph_->GetCurrentInstructionId(),
- /* expandable */ false,
+ /* expandable= */ false,
kArenaAllocSsaPhiElimination);
visited_phis_in_cycle.ClearAllBits();
ScopedArenaVector<HPhi*> cycle_worklist(allocator.Adapter(kArenaAllocSsaPhiElimination));
diff --git a/compiler/optimizing/superblock_cloner.h b/compiler/optimizing/superblock_cloner.h
index f211721..dbe9008 100644
--- a/compiler/optimizing/superblock_cloner.h
+++ b/compiler/optimizing/superblock_cloner.h
@@ -372,8 +372,8 @@
// Returns whether the loop can be peeled/unrolled.
bool IsLoopClonable() const { return cloner_.IsSubgraphClonable(); }
- HBasicBlock* DoPeeling() { return DoPeelUnrollImpl(/* to_unroll */ false); }
- HBasicBlock* DoUnrolling() { return DoPeelUnrollImpl(/* to_unroll */ true); }
+ HBasicBlock* DoPeeling() { return DoPeelUnrollImpl(/* to_unroll= */ false); }
+ HBasicBlock* DoUnrolling() { return DoPeelUnrollImpl(/* to_unroll= */ true); }
HLoopInformation* GetRegionToBeAdjusted() const { return cloner_.GetRegionToBeAdjusted(); }
protected:
diff --git a/compiler/utils/arm/assembler_arm_vixl.cc b/compiler/utils/arm/assembler_arm_vixl.cc
index ebb631e..77f5d70 100644
--- a/compiler/utils/arm/assembler_arm_vixl.cc
+++ b/compiler/utils/arm/assembler_arm_vixl.cc
@@ -91,7 +91,7 @@
___ Ldr(temp, MemOperand(tr, Thread::IsGcMarkingOffset<kArmPointerSize>().Int32Value()));
// Check that mr == self.tls32_.is.gc_marking.
___ Cmp(mr, temp);
- ___ B(eq, &mr_is_ok, /* far_target */ false);
+ ___ B(eq, &mr_is_ok, /* is_far_target= */ false);
___ Bkpt(code);
___ Bind(&mr_is_ok);
}
diff --git a/compiler/utils/assembler.h b/compiler/utils/assembler.h
index 096410d..0537225 100644
--- a/compiler/utils/assembler.h
+++ b/compiler/utils/assembler.h
@@ -295,7 +295,7 @@
void ImplicitlyAdvancePC() final;
explicit DebugFrameOpCodeWriterForAssembler(Assembler* buffer)
- : dwarf::DebugFrameOpCodeWriter<>(false /* enabled */),
+ : dwarf::DebugFrameOpCodeWriter<>(/* enabled= */ false),
assembler_(buffer),
delay_emitting_advance_pc_(false),
delayed_advance_pcs_() {
diff --git a/compiler/utils/assembler_thumb_test.cc b/compiler/utils/assembler_thumb_test.cc
index 3d26296..c9ece1d 100644
--- a/compiler/utils/assembler_thumb_test.cc
+++ b/compiler/utils/assembler_thumb_test.cc
@@ -239,7 +239,7 @@
__ Load(scratch_register, FrameOffset(4092), 4);
__ Load(scratch_register, FrameOffset(4096), 4);
__ LoadRawPtrFromThread(scratch_register, ThreadOffset32(512));
- __ LoadRef(method_register, scratch_register, MemberOffset(128), /* unpoison_reference */ false);
+ __ LoadRef(method_register, scratch_register, MemberOffset(128), /* unpoison_reference= */ false);
// Stores
__ Store(FrameOffset(32), method_register, 4);
@@ -284,7 +284,7 @@
__ DecreaseFrameSize(4096);
__ DecreaseFrameSize(32);
- __ RemoveFrame(frame_size, callee_save_regs, /* may_suspend */ true);
+ __ RemoveFrame(frame_size, callee_save_regs, /* may_suspend= */ true);
EmitAndCheck(&assembler, "VixlJniHelpers");
}
diff --git a/compiler/utils/mips/assembler_mips.cc b/compiler/utils/mips/assembler_mips.cc
index a673e32..a9d1a25 100644
--- a/compiler/utils/mips/assembler_mips.cc
+++ b/compiler/utils/mips/assembler_mips.cc
@@ -463,7 +463,7 @@
}
void MipsAssembler::Addiu(Register rt, Register rs, uint16_t imm16) {
- Addiu(rt, rs, imm16, /* patcher_label */ nullptr);
+ Addiu(rt, rs, imm16, /* patcher_label= */ nullptr);
}
void MipsAssembler::Subu(Register rd, Register rs, Register rt) {
@@ -732,7 +732,7 @@
}
void MipsAssembler::Lw(Register rt, Register rs, uint16_t imm16) {
- Lw(rt, rs, imm16, /* patcher_label */ nullptr);
+ Lw(rt, rs, imm16, /* patcher_label= */ nullptr);
}
void MipsAssembler::Lwl(Register rt, Register rs, uint16_t imm16) {
@@ -814,7 +814,7 @@
}
void MipsAssembler::Sw(Register rt, Register rs, uint16_t imm16) {
- Sw(rt, rs, imm16, /* patcher_label */ nullptr);
+ Sw(rt, rs, imm16, /* patcher_label= */ nullptr);
}
void MipsAssembler::Swl(Register rt, Register rs, uint16_t imm16) {
@@ -3755,7 +3755,7 @@
void MipsAssembler::Buncond(MipsLabel* label, bool is_r6, bool is_bare) {
uint32_t target = label->IsBound() ? GetLabelLocation(label) : Branch::kUnresolved;
- branches_.emplace_back(is_r6, buffer_.Size(), target, /* is_call */ false, is_bare);
+ branches_.emplace_back(is_r6, buffer_.Size(), target, /* is_call= */ false, is_bare);
MoveInstructionToDelaySlot(branches_.back());
FinalizeLabeledBranch(label);
}
@@ -3778,7 +3778,7 @@
void MipsAssembler::Call(MipsLabel* label, bool is_r6, bool is_bare) {
uint32_t target = label->IsBound() ? GetLabelLocation(label) : Branch::kUnresolved;
- branches_.emplace_back(is_r6, buffer_.Size(), target, /* is_call */ true, is_bare);
+ branches_.emplace_back(is_r6, buffer_.Size(), target, /* is_call= */ true, is_bare);
MoveInstructionToDelaySlot(branches_.back());
FinalizeLabeledBranch(label);
}
@@ -4300,43 +4300,43 @@
}
void MipsAssembler::B(MipsLabel* label, bool is_bare) {
- Buncond(label, /* is_r6 */ (IsR6() && !is_bare), is_bare);
+ Buncond(label, /* is_r6= */ (IsR6() && !is_bare), is_bare);
}
void MipsAssembler::Bal(MipsLabel* label, bool is_bare) {
- Call(label, /* is_r6 */ (IsR6() && !is_bare), is_bare);
+ Call(label, /* is_r6= */ (IsR6() && !is_bare), is_bare);
}
void MipsAssembler::Beq(Register rs, Register rt, MipsLabel* label, bool is_bare) {
- Bcond(label, /* is_r6 */ (IsR6() && !is_bare), is_bare, kCondEQ, rs, rt);
+ Bcond(label, /* is_r6= */ (IsR6() && !is_bare), is_bare, kCondEQ, rs, rt);
}
void MipsAssembler::Bne(Register rs, Register rt, MipsLabel* label, bool is_bare) {
- Bcond(label, /* is_r6 */ (IsR6() && !is_bare), is_bare, kCondNE, rs, rt);
+ Bcond(label, /* is_r6= */ (IsR6() && !is_bare), is_bare, kCondNE, rs, rt);
}
void MipsAssembler::Beqz(Register rt, MipsLabel* label, bool is_bare) {
- Bcond(label, /* is_r6 */ (IsR6() && !is_bare), is_bare, kCondEQZ, rt);
+ Bcond(label, /* is_r6= */ (IsR6() && !is_bare), is_bare, kCondEQZ, rt);
}
void MipsAssembler::Bnez(Register rt, MipsLabel* label, bool is_bare) {
- Bcond(label, /* is_r6 */ (IsR6() && !is_bare), is_bare, kCondNEZ, rt);
+ Bcond(label, /* is_r6= */ (IsR6() && !is_bare), is_bare, kCondNEZ, rt);
}
void MipsAssembler::Bltz(Register rt, MipsLabel* label, bool is_bare) {
- Bcond(label, /* is_r6 */ (IsR6() && !is_bare), is_bare, kCondLTZ, rt);
+ Bcond(label, /* is_r6= */ (IsR6() && !is_bare), is_bare, kCondLTZ, rt);
}
void MipsAssembler::Bgez(Register rt, MipsLabel* label, bool is_bare) {
- Bcond(label, /* is_r6 */ (IsR6() && !is_bare), is_bare, kCondGEZ, rt);
+ Bcond(label, /* is_r6= */ (IsR6() && !is_bare), is_bare, kCondGEZ, rt);
}
void MipsAssembler::Blez(Register rt, MipsLabel* label, bool is_bare) {
- Bcond(label, /* is_r6 */ (IsR6() && !is_bare), is_bare, kCondLEZ, rt);
+ Bcond(label, /* is_r6= */ (IsR6() && !is_bare), is_bare, kCondLEZ, rt);
}
void MipsAssembler::Bgtz(Register rt, MipsLabel* label, bool is_bare) {
- Bcond(label, /* is_r6 */ (IsR6() && !is_bare), is_bare, kCondGTZ, rt);
+ Bcond(label, /* is_r6= */ (IsR6() && !is_bare), is_bare, kCondGTZ, rt);
}
bool MipsAssembler::CanExchangeWithSlt(Register rs, Register rt) const {
@@ -4392,7 +4392,7 @@
Bcond(label, IsR6(), is_bare, kCondLT, rs, rt);
} else if (!Branch::IsNop(kCondLT, rs, rt)) {
// Synthesize the instruction (not available on R2).
- GenerateSltForCondBranch(/* unsigned_slt */ false, rs, rt);
+ GenerateSltForCondBranch(/* unsigned_slt= */ false, rs, rt);
Bnez(AT, label, is_bare);
}
}
@@ -4404,7 +4404,7 @@
B(label, is_bare);
} else {
// Synthesize the instruction (not available on R2).
- GenerateSltForCondBranch(/* unsigned_slt */ false, rs, rt);
+ GenerateSltForCondBranch(/* unsigned_slt= */ false, rs, rt);
Beqz(AT, label, is_bare);
}
}
@@ -4414,7 +4414,7 @@
Bcond(label, IsR6(), is_bare, kCondLTU, rs, rt);
} else if (!Branch::IsNop(kCondLTU, rs, rt)) {
// Synthesize the instruction (not available on R2).
- GenerateSltForCondBranch(/* unsigned_slt */ true, rs, rt);
+ GenerateSltForCondBranch(/* unsigned_slt= */ true, rs, rt);
Bnez(AT, label, is_bare);
}
}
@@ -4426,7 +4426,7 @@
B(label, is_bare);
} else {
// Synthesize the instruction (not available on R2).
- GenerateSltForCondBranch(/* unsigned_slt */ true, rs, rt);
+ GenerateSltForCondBranch(/* unsigned_slt= */ true, rs, rt);
Beqz(AT, label, is_bare);
}
}
@@ -4437,7 +4437,7 @@
void MipsAssembler::Bc1f(int cc, MipsLabel* label, bool is_bare) {
CHECK(IsUint<3>(cc)) << cc;
- Bcond(label, /* is_r6 */ false, is_bare, kCondF, static_cast<Register>(cc), ZERO);
+ Bcond(label, /* is_r6= */ false, is_bare, kCondF, static_cast<Register>(cc), ZERO);
}
void MipsAssembler::Bc1t(MipsLabel* label, bool is_bare) {
@@ -4446,71 +4446,71 @@
void MipsAssembler::Bc1t(int cc, MipsLabel* label, bool is_bare) {
CHECK(IsUint<3>(cc)) << cc;
- Bcond(label, /* is_r6 */ false, is_bare, kCondT, static_cast<Register>(cc), ZERO);
+ Bcond(label, /* is_r6= */ false, is_bare, kCondT, static_cast<Register>(cc), ZERO);
}
void MipsAssembler::Bc(MipsLabel* label, bool is_bare) {
- Buncond(label, /* is_r6 */ true, is_bare);
+ Buncond(label, /* is_r6= */ true, is_bare);
}
void MipsAssembler::Balc(MipsLabel* label, bool is_bare) {
- Call(label, /* is_r6 */ true, is_bare);
+ Call(label, /* is_r6= */ true, is_bare);
}
void MipsAssembler::Beqc(Register rs, Register rt, MipsLabel* label, bool is_bare) {
- Bcond(label, /* is_r6 */ true, is_bare, kCondEQ, rs, rt);
+ Bcond(label, /* is_r6= */ true, is_bare, kCondEQ, rs, rt);
}
void MipsAssembler::Bnec(Register rs, Register rt, MipsLabel* label, bool is_bare) {
- Bcond(label, /* is_r6 */ true, is_bare, kCondNE, rs, rt);
+ Bcond(label, /* is_r6= */ true, is_bare, kCondNE, rs, rt);
}
void MipsAssembler::Beqzc(Register rt, MipsLabel* label, bool is_bare) {
- Bcond(label, /* is_r6 */ true, is_bare, kCondEQZ, rt);
+ Bcond(label, /* is_r6= */ true, is_bare, kCondEQZ, rt);
}
void MipsAssembler::Bnezc(Register rt, MipsLabel* label, bool is_bare) {
- Bcond(label, /* is_r6 */ true, is_bare, kCondNEZ, rt);
+ Bcond(label, /* is_r6= */ true, is_bare, kCondNEZ, rt);
}
void MipsAssembler::Bltzc(Register rt, MipsLabel* label, bool is_bare) {
- Bcond(label, /* is_r6 */ true, is_bare, kCondLTZ, rt);
+ Bcond(label, /* is_r6= */ true, is_bare, kCondLTZ, rt);
}
void MipsAssembler::Bgezc(Register rt, MipsLabel* label, bool is_bare) {
- Bcond(label, /* is_r6 */ true, is_bare, kCondGEZ, rt);
+ Bcond(label, /* is_r6= */ true, is_bare, kCondGEZ, rt);
}
void MipsAssembler::Blezc(Register rt, MipsLabel* label, bool is_bare) {
- Bcond(label, /* is_r6 */ true, is_bare, kCondLEZ, rt);
+ Bcond(label, /* is_r6= */ true, is_bare, kCondLEZ, rt);
}
void MipsAssembler::Bgtzc(Register rt, MipsLabel* label, bool is_bare) {
- Bcond(label, /* is_r6 */ true, is_bare, kCondGTZ, rt);
+ Bcond(label, /* is_r6= */ true, is_bare, kCondGTZ, rt);
}
void MipsAssembler::Bltc(Register rs, Register rt, MipsLabel* label, bool is_bare) {
- Bcond(label, /* is_r6 */ true, is_bare, kCondLT, rs, rt);
+ Bcond(label, /* is_r6= */ true, is_bare, kCondLT, rs, rt);
}
void MipsAssembler::Bgec(Register rs, Register rt, MipsLabel* label, bool is_bare) {
- Bcond(label, /* is_r6 */ true, is_bare, kCondGE, rs, rt);
+ Bcond(label, /* is_r6= */ true, is_bare, kCondGE, rs, rt);
}
void MipsAssembler::Bltuc(Register rs, Register rt, MipsLabel* label, bool is_bare) {
- Bcond(label, /* is_r6 */ true, is_bare, kCondLTU, rs, rt);
+ Bcond(label, /* is_r6= */ true, is_bare, kCondLTU, rs, rt);
}
void MipsAssembler::Bgeuc(Register rs, Register rt, MipsLabel* label, bool is_bare) {
- Bcond(label, /* is_r6 */ true, is_bare, kCondGEU, rs, rt);
+ Bcond(label, /* is_r6= */ true, is_bare, kCondGEU, rs, rt);
}
void MipsAssembler::Bc1eqz(FRegister ft, MipsLabel* label, bool is_bare) {
- Bcond(label, /* is_r6 */ true, is_bare, kCondF, static_cast<Register>(ft), ZERO);
+ Bcond(label, /* is_r6= */ true, is_bare, kCondF, static_cast<Register>(ft), ZERO);
}
void MipsAssembler::Bc1nez(FRegister ft, MipsLabel* label, bool is_bare) {
- Bcond(label, /* is_r6 */ true, is_bare, kCondT, static_cast<Register>(ft), ZERO);
+ Bcond(label, /* is_r6= */ true, is_bare, kCondT, static_cast<Register>(ft), ZERO);
}
void MipsAssembler::AdjustBaseAndOffset(Register& base,
diff --git a/compiler/utils/mips/assembler_mips.h b/compiler/utils/mips/assembler_mips.h
index 8a1e1df..69189a4 100644
--- a/compiler/utils/mips/assembler_mips.h
+++ b/compiler/utils/mips/assembler_mips.h
@@ -862,7 +862,7 @@
// We permit `base` and `temp` to coincide (however, we check that neither is AT),
// in which case the `base` register may be overwritten in the process.
CHECK_NE(temp, AT); // Must not use AT as temp, so as not to overwrite the adjusted base.
- AdjustBaseAndOffset(base, offset, /* is_doubleword */ (type == kStoreDoubleword));
+ AdjustBaseAndOffset(base, offset, /* is_doubleword= */ (type == kStoreDoubleword));
uint32_t low = Low32Bits(value);
uint32_t high = High32Bits(value);
Register reg;
@@ -917,7 +917,7 @@
Register base,
int32_t offset,
ImplicitNullChecker null_checker = NoImplicitNullChecker()) {
- AdjustBaseAndOffset(base, offset, /* is_doubleword */ (type == kLoadDoubleword));
+ AdjustBaseAndOffset(base, offset, /* is_doubleword= */ (type == kLoadDoubleword));
switch (type) {
case kLoadSignedByte:
Lb(reg, base, offset);
@@ -960,7 +960,7 @@
Register base,
int32_t offset,
ImplicitNullChecker null_checker = NoImplicitNullChecker()) {
- AdjustBaseAndOffset(base, offset, /* is_doubleword */ false, /* is_float */ true);
+ AdjustBaseAndOffset(base, offset, /* is_doubleword= */ false, /* is_float= */ true);
Lwc1(reg, base, offset);
null_checker();
}
@@ -970,7 +970,7 @@
Register base,
int32_t offset,
ImplicitNullChecker null_checker = NoImplicitNullChecker()) {
- AdjustBaseAndOffset(base, offset, /* is_doubleword */ true, /* is_float */ true);
+ AdjustBaseAndOffset(base, offset, /* is_doubleword= */ true, /* is_float= */ true);
if (IsAligned<kMipsDoublewordSize>(offset)) {
Ldc1(reg, base, offset);
null_checker();
@@ -1016,7 +1016,7 @@
// Must not use AT as `reg`, so as not to overwrite the value being stored
// with the adjusted `base`.
CHECK_NE(reg, AT);
- AdjustBaseAndOffset(base, offset, /* is_doubleword */ (type == kStoreDoubleword));
+ AdjustBaseAndOffset(base, offset, /* is_doubleword= */ (type == kStoreDoubleword));
switch (type) {
case kStoreByte:
Sb(reg, base, offset);
@@ -1047,7 +1047,7 @@
Register base,
int32_t offset,
ImplicitNullChecker null_checker = NoImplicitNullChecker()) {
- AdjustBaseAndOffset(base, offset, /* is_doubleword */ false, /* is_float */ true);
+ AdjustBaseAndOffset(base, offset, /* is_doubleword= */ false, /* is_float= */ true);
Swc1(reg, base, offset);
null_checker();
}
@@ -1057,7 +1057,7 @@
Register base,
int32_t offset,
ImplicitNullChecker null_checker = NoImplicitNullChecker()) {
- AdjustBaseAndOffset(base, offset, /* is_doubleword */ true, /* is_float */ true);
+ AdjustBaseAndOffset(base, offset, /* is_doubleword= */ true, /* is_float= */ true);
if (IsAligned<kMipsDoublewordSize>(offset)) {
Sdc1(reg, base, offset);
null_checker();
diff --git a/compiler/utils/mips/assembler_mips32r6_test.cc b/compiler/utils/mips/assembler_mips32r6_test.cc
index 723c489..4e27bbf 100644
--- a/compiler/utils/mips/assembler_mips32r6_test.cc
+++ b/compiler/utils/mips/assembler_mips32r6_test.cc
@@ -1078,11 +1078,11 @@
//////////////
TEST_F(AssemblerMIPS32r6Test, Bc) {
- BranchHelper(&mips::MipsAssembler::Bc, "Bc", /* has_slot */ false);
+ BranchHelper(&mips::MipsAssembler::Bc, "Bc", /* has_slot= */ false);
}
TEST_F(AssemblerMIPS32r6Test, Balc) {
- BranchHelper(&mips::MipsAssembler::Balc, "Balc", /* has_slot */ false);
+ BranchHelper(&mips::MipsAssembler::Balc, "Balc", /* has_slot= */ false);
}
TEST_F(AssemblerMIPS32r6Test, Beqc) {
@@ -1142,11 +1142,11 @@
}
TEST_F(AssemblerMIPS32r6Test, B) {
- BranchHelper(&mips::MipsAssembler::B, "Bc", /* has_slot */ false);
+ BranchHelper(&mips::MipsAssembler::B, "Bc", /* has_slot= */ false);
}
TEST_F(AssemblerMIPS32r6Test, Bal) {
- BranchHelper(&mips::MipsAssembler::Bal, "Balc", /* has_slot */ false);
+ BranchHelper(&mips::MipsAssembler::Bal, "Balc", /* has_slot= */ false);
}
TEST_F(AssemblerMIPS32r6Test, Beq) {
@@ -1198,123 +1198,123 @@
}
TEST_F(AssemblerMIPS32r6Test, BareBc) {
- BranchHelper(&mips::MipsAssembler::Bc, "Bc", /* has_slot */ false, /* is_bare */ true);
+ BranchHelper(&mips::MipsAssembler::Bc, "Bc", /* has_slot= */ false, /* is_bare= */ true);
}
TEST_F(AssemblerMIPS32r6Test, BareBalc) {
- BranchHelper(&mips::MipsAssembler::Balc, "Balc", /* has_slot */ false, /* is_bare */ true);
+ BranchHelper(&mips::MipsAssembler::Balc, "Balc", /* has_slot= */ false, /* is_bare= */ true);
}
TEST_F(AssemblerMIPS32r6Test, BareBeqc) {
- BranchCondTwoRegsHelper(&mips::MipsAssembler::Beqc, "Beqc", /* is_bare */ true);
+ BranchCondTwoRegsHelper(&mips::MipsAssembler::Beqc, "Beqc", /* is_bare= */ true);
}
TEST_F(AssemblerMIPS32r6Test, BareBnec) {
- BranchCondTwoRegsHelper(&mips::MipsAssembler::Bnec, "Bnec", /* is_bare */ true);
+ BranchCondTwoRegsHelper(&mips::MipsAssembler::Bnec, "Bnec", /* is_bare= */ true);
}
TEST_F(AssemblerMIPS32r6Test, BareBeqzc) {
- BranchCondOneRegHelper(&mips::MipsAssembler::Beqzc, "Beqzc", /* is_bare */ true);
+ BranchCondOneRegHelper(&mips::MipsAssembler::Beqzc, "Beqzc", /* is_bare= */ true);
}
TEST_F(AssemblerMIPS32r6Test, BareBnezc) {
- BranchCondOneRegHelper(&mips::MipsAssembler::Bnezc, "Bnezc", /* is_bare */ true);
+ BranchCondOneRegHelper(&mips::MipsAssembler::Bnezc, "Bnezc", /* is_bare= */ true);
}
TEST_F(AssemblerMIPS32r6Test, BareBltzc) {
- BranchCondOneRegHelper(&mips::MipsAssembler::Bltzc, "Bltzc", /* is_bare */ true);
+ BranchCondOneRegHelper(&mips::MipsAssembler::Bltzc, "Bltzc", /* is_bare= */ true);
}
TEST_F(AssemblerMIPS32r6Test, BareBgezc) {
- BranchCondOneRegHelper(&mips::MipsAssembler::Bgezc, "Bgezc", /* is_bare */ true);
+ BranchCondOneRegHelper(&mips::MipsAssembler::Bgezc, "Bgezc", /* is_bare= */ true);
}
TEST_F(AssemblerMIPS32r6Test, BareBlezc) {
- BranchCondOneRegHelper(&mips::MipsAssembler::Blezc, "Blezc", /* is_bare */ true);
+ BranchCondOneRegHelper(&mips::MipsAssembler::Blezc, "Blezc", /* is_bare= */ true);
}
TEST_F(AssemblerMIPS32r6Test, BareBgtzc) {
- BranchCondOneRegHelper(&mips::MipsAssembler::Bgtzc, "Bgtzc", /* is_bare */ true);
+ BranchCondOneRegHelper(&mips::MipsAssembler::Bgtzc, "Bgtzc", /* is_bare= */ true);
}
TEST_F(AssemblerMIPS32r6Test, BareBltc) {
- BranchCondTwoRegsHelper(&mips::MipsAssembler::Bltc, "Bltc", /* is_bare */ true);
+ BranchCondTwoRegsHelper(&mips::MipsAssembler::Bltc, "Bltc", /* is_bare= */ true);
}
TEST_F(AssemblerMIPS32r6Test, BareBgec) {
- BranchCondTwoRegsHelper(&mips::MipsAssembler::Bgec, "Bgec", /* is_bare */ true);
+ BranchCondTwoRegsHelper(&mips::MipsAssembler::Bgec, "Bgec", /* is_bare= */ true);
}
TEST_F(AssemblerMIPS32r6Test, BareBltuc) {
- BranchCondTwoRegsHelper(&mips::MipsAssembler::Bltuc, "Bltuc", /* is_bare */ true);
+ BranchCondTwoRegsHelper(&mips::MipsAssembler::Bltuc, "Bltuc", /* is_bare= */ true);
}
TEST_F(AssemblerMIPS32r6Test, BareBgeuc) {
- BranchCondTwoRegsHelper(&mips::MipsAssembler::Bgeuc, "Bgeuc", /* is_bare */ true);
+ BranchCondTwoRegsHelper(&mips::MipsAssembler::Bgeuc, "Bgeuc", /* is_bare= */ true);
}
TEST_F(AssemblerMIPS32r6Test, BareBc1eqz) {
- BranchFpuCondHelper(&mips::MipsAssembler::Bc1eqz, "Bc1eqz", /* is_bare */ true);
+ BranchFpuCondHelper(&mips::MipsAssembler::Bc1eqz, "Bc1eqz", /* is_bare= */ true);
}
TEST_F(AssemblerMIPS32r6Test, BareBc1nez) {
- BranchFpuCondHelper(&mips::MipsAssembler::Bc1nez, "Bc1nez", /* is_bare */ true);
+ BranchFpuCondHelper(&mips::MipsAssembler::Bc1nez, "Bc1nez", /* is_bare= */ true);
}
TEST_F(AssemblerMIPS32r6Test, BareB) {
- BranchHelper(&mips::MipsAssembler::B, "B", /* has_slot */ true, /* is_bare */ true);
+ BranchHelper(&mips::MipsAssembler::B, "B", /* has_slot= */ true, /* is_bare= */ true);
}
TEST_F(AssemblerMIPS32r6Test, BareBal) {
- BranchHelper(&mips::MipsAssembler::Bal, "Bal", /* has_slot */ true, /* is_bare */ true);
+ BranchHelper(&mips::MipsAssembler::Bal, "Bal", /* has_slot= */ true, /* is_bare= */ true);
}
TEST_F(AssemblerMIPS32r6Test, BareBeq) {
- BranchCondTwoRegsHelper(&mips::MipsAssembler::Beq, "Beq", /* is_bare */ true);
+ BranchCondTwoRegsHelper(&mips::MipsAssembler::Beq, "Beq", /* is_bare= */ true);
}
TEST_F(AssemblerMIPS32r6Test, BareBne) {
- BranchCondTwoRegsHelper(&mips::MipsAssembler::Bne, "Bne", /* is_bare */ true);
+ BranchCondTwoRegsHelper(&mips::MipsAssembler::Bne, "Bne", /* is_bare= */ true);
}
TEST_F(AssemblerMIPS32r6Test, BareBeqz) {
- BranchCondOneRegHelper(&mips::MipsAssembler::Beqz, "Beqz", /* is_bare */ true);
+ BranchCondOneRegHelper(&mips::MipsAssembler::Beqz, "Beqz", /* is_bare= */ true);
}
TEST_F(AssemblerMIPS32r6Test, BareBnez) {
- BranchCondOneRegHelper(&mips::MipsAssembler::Bnez, "Bnez", /* is_bare */ true);
+ BranchCondOneRegHelper(&mips::MipsAssembler::Bnez, "Bnez", /* is_bare= */ true);
}
TEST_F(AssemblerMIPS32r6Test, BareBltz) {
- BranchCondOneRegHelper(&mips::MipsAssembler::Bltz, "Bltz", /* is_bare */ true);
+ BranchCondOneRegHelper(&mips::MipsAssembler::Bltz, "Bltz", /* is_bare= */ true);
}
TEST_F(AssemblerMIPS32r6Test, BareBgez) {
- BranchCondOneRegHelper(&mips::MipsAssembler::Bgez, "Bgez", /* is_bare */ true);
+ BranchCondOneRegHelper(&mips::MipsAssembler::Bgez, "Bgez", /* is_bare= */ true);
}
TEST_F(AssemblerMIPS32r6Test, BareBlez) {
- BranchCondOneRegHelper(&mips::MipsAssembler::Blez, "Blez", /* is_bare */ true);
+ BranchCondOneRegHelper(&mips::MipsAssembler::Blez, "Blez", /* is_bare= */ true);
}
TEST_F(AssemblerMIPS32r6Test, BareBgtz) {
- BranchCondOneRegHelper(&mips::MipsAssembler::Bgtz, "Bgtz", /* is_bare */ true);
+ BranchCondOneRegHelper(&mips::MipsAssembler::Bgtz, "Bgtz", /* is_bare= */ true);
}
TEST_F(AssemblerMIPS32r6Test, BareBlt) {
- BranchCondTwoRegsHelper(&mips::MipsAssembler::Blt, "Blt", /* is_bare */ true);
+ BranchCondTwoRegsHelper(&mips::MipsAssembler::Blt, "Blt", /* is_bare= */ true);
}
TEST_F(AssemblerMIPS32r6Test, BareBge) {
- BranchCondTwoRegsHelper(&mips::MipsAssembler::Bge, "Bge", /* is_bare */ true);
+ BranchCondTwoRegsHelper(&mips::MipsAssembler::Bge, "Bge", /* is_bare= */ true);
}
TEST_F(AssemblerMIPS32r6Test, BareBltu) {
- BranchCondTwoRegsHelper(&mips::MipsAssembler::Bltu, "Bltu", /* is_bare */ true);
+ BranchCondTwoRegsHelper(&mips::MipsAssembler::Bltu, "Bltu", /* is_bare= */ true);
}
TEST_F(AssemblerMIPS32r6Test, BareBgeu) {
- BranchCondTwoRegsHelper(&mips::MipsAssembler::Bgeu, "Bgeu", /* is_bare */ true);
+ BranchCondTwoRegsHelper(&mips::MipsAssembler::Bgeu, "Bgeu", /* is_bare= */ true);
}
TEST_F(AssemblerMIPS32r6Test, LongBeqc) {
diff --git a/compiler/utils/mips/assembler_mips_test.cc b/compiler/utils/mips/assembler_mips_test.cc
index 4f8ccee..c0894d3 100644
--- a/compiler/utils/mips/assembler_mips_test.cc
+++ b/compiler/utils/mips/assembler_mips_test.cc
@@ -2241,67 +2241,67 @@
}
TEST_F(AssemblerMIPSTest, BareB) {
- BranchHelper(&mips::MipsAssembler::B, "B", /* is_bare */ true);
+ BranchHelper(&mips::MipsAssembler::B, "B", /* is_bare= */ true);
}
TEST_F(AssemblerMIPSTest, BareBal) {
- BranchHelper(&mips::MipsAssembler::Bal, "Bal", /* is_bare */ true);
+ BranchHelper(&mips::MipsAssembler::Bal, "Bal", /* is_bare= */ true);
}
TEST_F(AssemblerMIPSTest, BareBeq) {
- BranchCondTwoRegsHelper(&mips::MipsAssembler::Beq, "Beq", /* is_bare */ true);
+ BranchCondTwoRegsHelper(&mips::MipsAssembler::Beq, "Beq", /* is_bare= */ true);
}
TEST_F(AssemblerMIPSTest, BareBne) {
- BranchCondTwoRegsHelper(&mips::MipsAssembler::Bne, "Bne", /* is_bare */ true);
+ BranchCondTwoRegsHelper(&mips::MipsAssembler::Bne, "Bne", /* is_bare= */ true);
}
TEST_F(AssemblerMIPSTest, BareBeqz) {
- BranchCondOneRegHelper(&mips::MipsAssembler::Beqz, "Beqz", /* is_bare */ true);
+ BranchCondOneRegHelper(&mips::MipsAssembler::Beqz, "Beqz", /* is_bare= */ true);
}
TEST_F(AssemblerMIPSTest, BareBnez) {
- BranchCondOneRegHelper(&mips::MipsAssembler::Bnez, "Bnez", /* is_bare */ true);
+ BranchCondOneRegHelper(&mips::MipsAssembler::Bnez, "Bnez", /* is_bare= */ true);
}
TEST_F(AssemblerMIPSTest, BareBltz) {
- BranchCondOneRegHelper(&mips::MipsAssembler::Bltz, "Bltz", /* is_bare */ true);
+ BranchCondOneRegHelper(&mips::MipsAssembler::Bltz, "Bltz", /* is_bare= */ true);
}
TEST_F(AssemblerMIPSTest, BareBgez) {
- BranchCondOneRegHelper(&mips::MipsAssembler::Bgez, "Bgez", /* is_bare */ true);
+ BranchCondOneRegHelper(&mips::MipsAssembler::Bgez, "Bgez", /* is_bare= */ true);
}
TEST_F(AssemblerMIPSTest, BareBlez) {
- BranchCondOneRegHelper(&mips::MipsAssembler::Blez, "Blez", /* is_bare */ true);
+ BranchCondOneRegHelper(&mips::MipsAssembler::Blez, "Blez", /* is_bare= */ true);
}
TEST_F(AssemblerMIPSTest, BareBgtz) {
- BranchCondOneRegHelper(&mips::MipsAssembler::Bgtz, "Bgtz", /* is_bare */ true);
+ BranchCondOneRegHelper(&mips::MipsAssembler::Bgtz, "Bgtz", /* is_bare= */ true);
}
TEST_F(AssemblerMIPSTest, BareBlt) {
- BranchCondTwoRegsHelper(&mips::MipsAssembler::Blt, "Blt", /* is_bare */ true);
+ BranchCondTwoRegsHelper(&mips::MipsAssembler::Blt, "Blt", /* is_bare= */ true);
}
TEST_F(AssemblerMIPSTest, BareBge) {
- BranchCondTwoRegsHelper(&mips::MipsAssembler::Bge, "Bge", /* is_bare */ true);
+ BranchCondTwoRegsHelper(&mips::MipsAssembler::Bge, "Bge", /* is_bare= */ true);
}
TEST_F(AssemblerMIPSTest, BareBltu) {
- BranchCondTwoRegsHelper(&mips::MipsAssembler::Bltu, "Bltu", /* is_bare */ true);
+ BranchCondTwoRegsHelper(&mips::MipsAssembler::Bltu, "Bltu", /* is_bare= */ true);
}
TEST_F(AssemblerMIPSTest, BareBgeu) {
- BranchCondTwoRegsHelper(&mips::MipsAssembler::Bgeu, "Bgeu", /* is_bare */ true);
+ BranchCondTwoRegsHelper(&mips::MipsAssembler::Bgeu, "Bgeu", /* is_bare= */ true);
}
TEST_F(AssemblerMIPSTest, BareBc1f) {
- BranchFpuCondCodeHelper(&mips::MipsAssembler::Bc1f, "Bc1f", /* is_bare */ true);
+ BranchFpuCondCodeHelper(&mips::MipsAssembler::Bc1f, "Bc1f", /* is_bare= */ true);
}
TEST_F(AssemblerMIPSTest, BareBc1t) {
- BranchFpuCondCodeHelper(&mips::MipsAssembler::Bc1t, "Bc1t", /* is_bare */ true);
+ BranchFpuCondCodeHelper(&mips::MipsAssembler::Bc1t, "Bc1t", /* is_bare= */ true);
}
TEST_F(AssemblerMIPSTest, ImpossibleReordering) {
diff --git a/compiler/utils/mips64/assembler_mips64.cc b/compiler/utils/mips64/assembler_mips64.cc
index 29d2bed..70313ca 100644
--- a/compiler/utils/mips64/assembler_mips64.cc
+++ b/compiler/utils/mips64/assembler_mips64.cc
@@ -2455,7 +2455,7 @@
condition_(kUncond) {
InitializeType(
(is_call ? (is_bare ? kBareCall : kCall) : (is_bare ? kBareCondBranch : kCondBranch)),
- /* is_r6 */ true);
+ /* is_r6= */ true);
}
Mips64Assembler::Branch::Branch(bool is_r6,
@@ -2516,7 +2516,7 @@
rhs_reg_(ZERO),
condition_(kUncond) {
CHECK_NE(dest_reg, ZERO);
- InitializeType(label_or_literal_type, /* is_r6 */ true);
+ InitializeType(label_or_literal_type, /* is_r6= */ true);
}
Mips64Assembler::BranchCondition Mips64Assembler::Branch::OppositeCondition(
@@ -2896,7 +2896,7 @@
void Mips64Assembler::Buncond(Mips64Label* label, bool is_bare) {
uint32_t target = label->IsBound() ? GetLabelLocation(label) : Branch::kUnresolved;
- branches_.emplace_back(buffer_.Size(), target, /* is_call */ false, is_bare);
+ branches_.emplace_back(buffer_.Size(), target, /* is_call= */ false, is_bare);
FinalizeLabeledBranch(label);
}
@@ -2917,7 +2917,7 @@
void Mips64Assembler::Call(Mips64Label* label, bool is_bare) {
uint32_t target = label->IsBound() ? GetLabelLocation(label) : Branch::kUnresolved;
- branches_.emplace_back(buffer_.Size(), target, /* is_call */ true, is_bare);
+ branches_.emplace_back(buffer_.Size(), target, /* is_call= */ true, is_bare);
FinalizeLabeledBranch(label);
}
@@ -3278,99 +3278,99 @@
}
void Mips64Assembler::Bltc(GpuRegister rs, GpuRegister rt, Mips64Label* label, bool is_bare) {
- Bcond(label, /* is_r6 */ true, is_bare, kCondLT, rs, rt);
+ Bcond(label, /* is_r6= */ true, is_bare, kCondLT, rs, rt);
}
void Mips64Assembler::Bltzc(GpuRegister rt, Mips64Label* label, bool is_bare) {
- Bcond(label, /* is_r6 */ true, is_bare, kCondLTZ, rt);
+ Bcond(label, /* is_r6= */ true, is_bare, kCondLTZ, rt);
}
void Mips64Assembler::Bgtzc(GpuRegister rt, Mips64Label* label, bool is_bare) {
- Bcond(label, /* is_r6 */ true, is_bare, kCondGTZ, rt);
+ Bcond(label, /* is_r6= */ true, is_bare, kCondGTZ, rt);
}
void Mips64Assembler::Bgec(GpuRegister rs, GpuRegister rt, Mips64Label* label, bool is_bare) {
- Bcond(label, /* is_r6 */ true, is_bare, kCondGE, rs, rt);
+ Bcond(label, /* is_r6= */ true, is_bare, kCondGE, rs, rt);
}
void Mips64Assembler::Bgezc(GpuRegister rt, Mips64Label* label, bool is_bare) {
- Bcond(label, /* is_r6 */ true, is_bare, kCondGEZ, rt);
+ Bcond(label, /* is_r6= */ true, is_bare, kCondGEZ, rt);
}
void Mips64Assembler::Blezc(GpuRegister rt, Mips64Label* label, bool is_bare) {
- Bcond(label, /* is_r6 */ true, is_bare, kCondLEZ, rt);
+ Bcond(label, /* is_r6= */ true, is_bare, kCondLEZ, rt);
}
void Mips64Assembler::Bltuc(GpuRegister rs, GpuRegister rt, Mips64Label* label, bool is_bare) {
- Bcond(label, /* is_r6 */ true, is_bare, kCondLTU, rs, rt);
+ Bcond(label, /* is_r6= */ true, is_bare, kCondLTU, rs, rt);
}
void Mips64Assembler::Bgeuc(GpuRegister rs, GpuRegister rt, Mips64Label* label, bool is_bare) {
- Bcond(label, /* is_r6 */ true, is_bare, kCondGEU, rs, rt);
+ Bcond(label, /* is_r6= */ true, is_bare, kCondGEU, rs, rt);
}
void Mips64Assembler::Beqc(GpuRegister rs, GpuRegister rt, Mips64Label* label, bool is_bare) {
- Bcond(label, /* is_r6 */ true, is_bare, kCondEQ, rs, rt);
+ Bcond(label, /* is_r6= */ true, is_bare, kCondEQ, rs, rt);
}
void Mips64Assembler::Bnec(GpuRegister rs, GpuRegister rt, Mips64Label* label, bool is_bare) {
- Bcond(label, /* is_r6 */ true, is_bare, kCondNE, rs, rt);
+ Bcond(label, /* is_r6= */ true, is_bare, kCondNE, rs, rt);
}
void Mips64Assembler::Beqzc(GpuRegister rs, Mips64Label* label, bool is_bare) {
- Bcond(label, /* is_r6 */ true, is_bare, kCondEQZ, rs);
+ Bcond(label, /* is_r6= */ true, is_bare, kCondEQZ, rs);
}
void Mips64Assembler::Bnezc(GpuRegister rs, Mips64Label* label, bool is_bare) {
- Bcond(label, /* is_r6 */ true, is_bare, kCondNEZ, rs);
+ Bcond(label, /* is_r6= */ true, is_bare, kCondNEZ, rs);
}
void Mips64Assembler::Bc1eqz(FpuRegister ft, Mips64Label* label, bool is_bare) {
- Bcond(label, /* is_r6 */ true, is_bare, kCondF, static_cast<GpuRegister>(ft), ZERO);
+ Bcond(label, /* is_r6= */ true, is_bare, kCondF, static_cast<GpuRegister>(ft), ZERO);
}
void Mips64Assembler::Bc1nez(FpuRegister ft, Mips64Label* label, bool is_bare) {
- Bcond(label, /* is_r6 */ true, is_bare, kCondT, static_cast<GpuRegister>(ft), ZERO);
+ Bcond(label, /* is_r6= */ true, is_bare, kCondT, static_cast<GpuRegister>(ft), ZERO);
}
void Mips64Assembler::Bltz(GpuRegister rt, Mips64Label* label, bool is_bare) {
CHECK(is_bare);
- Bcond(label, /* is_r6 */ false, is_bare, kCondLTZ, rt);
+ Bcond(label, /* is_r6= */ false, is_bare, kCondLTZ, rt);
}
void Mips64Assembler::Bgtz(GpuRegister rt, Mips64Label* label, bool is_bare) {
CHECK(is_bare);
- Bcond(label, /* is_r6 */ false, is_bare, kCondGTZ, rt);
+ Bcond(label, /* is_r6= */ false, is_bare, kCondGTZ, rt);
}
void Mips64Assembler::Bgez(GpuRegister rt, Mips64Label* label, bool is_bare) {
CHECK(is_bare);
- Bcond(label, /* is_r6 */ false, is_bare, kCondGEZ, rt);
+ Bcond(label, /* is_r6= */ false, is_bare, kCondGEZ, rt);
}
void Mips64Assembler::Blez(GpuRegister rt, Mips64Label* label, bool is_bare) {
CHECK(is_bare);
- Bcond(label, /* is_r6 */ false, is_bare, kCondLEZ, rt);
+ Bcond(label, /* is_r6= */ false, is_bare, kCondLEZ, rt);
}
void Mips64Assembler::Beq(GpuRegister rs, GpuRegister rt, Mips64Label* label, bool is_bare) {
CHECK(is_bare);
- Bcond(label, /* is_r6 */ false, is_bare, kCondEQ, rs, rt);
+ Bcond(label, /* is_r6= */ false, is_bare, kCondEQ, rs, rt);
}
void Mips64Assembler::Bne(GpuRegister rs, GpuRegister rt, Mips64Label* label, bool is_bare) {
CHECK(is_bare);
- Bcond(label, /* is_r6 */ false, is_bare, kCondNE, rs, rt);
+ Bcond(label, /* is_r6= */ false, is_bare, kCondNE, rs, rt);
}
void Mips64Assembler::Beqz(GpuRegister rs, Mips64Label* label, bool is_bare) {
CHECK(is_bare);
- Bcond(label, /* is_r6 */ false, is_bare, kCondEQZ, rs);
+ Bcond(label, /* is_r6= */ false, is_bare, kCondEQZ, rs);
}
void Mips64Assembler::Bnez(GpuRegister rs, Mips64Label* label, bool is_bare) {
CHECK(is_bare);
- Bcond(label, /* is_r6 */ false, is_bare, kCondNEZ, rs);
+ Bcond(label, /* is_r6= */ false, is_bare, kCondNEZ, rs);
}
void Mips64Assembler::AdjustBaseAndOffset(GpuRegister& base,
diff --git a/compiler/utils/mips64/assembler_mips64.h b/compiler/utils/mips64/assembler_mips64.h
index ce447db..2f991e9 100644
--- a/compiler/utils/mips64/assembler_mips64.h
+++ b/compiler/utils/mips64/assembler_mips64.h
@@ -1058,7 +1058,7 @@
// We permit `base` and `temp` to coincide (however, we check that neither is AT),
// in which case the `base` register may be overwritten in the process.
CHECK_NE(temp, AT); // Must not use AT as temp, so as not to overwrite the adjusted base.
- AdjustBaseAndOffset(base, offset, /* is_doubleword */ (type == kStoreDoubleword));
+ AdjustBaseAndOffset(base, offset, /* is_doubleword= */ (type == kStoreDoubleword));
GpuRegister reg;
// If the adjustment left `base` unchanged and equal to `temp`, we can't use `temp`
// to load and hold the value but we can use AT instead as AT hasn't been used yet.
@@ -1127,7 +1127,7 @@
GpuRegister base,
int32_t offset,
ImplicitNullChecker null_checker = NoImplicitNullChecker()) {
- AdjustBaseAndOffset(base, offset, /* is_doubleword */ (type == kLoadDoubleword));
+ AdjustBaseAndOffset(base, offset, /* is_doubleword= */ (type == kLoadDoubleword));
switch (type) {
case kLoadSignedByte:
@@ -1178,7 +1178,7 @@
ImplicitNullChecker null_checker = NoImplicitNullChecker()) {
int element_size_shift = -1;
if (type != kLoadQuadword) {
- AdjustBaseAndOffset(base, offset, /* is_doubleword */ (type == kLoadDoubleword));
+ AdjustBaseAndOffset(base, offset, /* is_doubleword= */ (type == kLoadDoubleword));
} else {
AdjustBaseOffsetAndElementSizeShift(base, offset, element_size_shift);
}
@@ -1226,7 +1226,7 @@
// Must not use AT as `reg`, so as not to overwrite the value being stored
// with the adjusted `base`.
CHECK_NE(reg, AT);
- AdjustBaseAndOffset(base, offset, /* is_doubleword */ (type == kStoreDoubleword));
+ AdjustBaseAndOffset(base, offset, /* is_doubleword= */ (type == kStoreDoubleword));
switch (type) {
case kStoreByte:
@@ -1267,7 +1267,7 @@
ImplicitNullChecker null_checker = NoImplicitNullChecker()) {
int element_size_shift = -1;
if (type != kStoreQuadword) {
- AdjustBaseAndOffset(base, offset, /* is_doubleword */ (type == kStoreDoubleword));
+ AdjustBaseAndOffset(base, offset, /* is_doubleword= */ (type == kStoreDoubleword));
} else {
AdjustBaseOffsetAndElementSizeShift(base, offset, element_size_shift);
}
diff --git a/compiler/utils/mips64/assembler_mips64_test.cc b/compiler/utils/mips64/assembler_mips64_test.cc
index 66711c3..499e8f4 100644
--- a/compiler/utils/mips64/assembler_mips64_test.cc
+++ b/compiler/utils/mips64/assembler_mips64_test.cc
@@ -852,99 +852,99 @@
}
TEST_F(AssemblerMIPS64Test, BareBc) {
- BranchHelper(&mips64::Mips64Assembler::Bc, "Bc", /* is_bare */ true);
+ BranchHelper(&mips64::Mips64Assembler::Bc, "Bc", /* is_bare= */ true);
}
TEST_F(AssemblerMIPS64Test, BareBalc) {
- BranchHelper(&mips64::Mips64Assembler::Balc, "Balc", /* is_bare */ true);
+ BranchHelper(&mips64::Mips64Assembler::Balc, "Balc", /* is_bare= */ true);
}
TEST_F(AssemblerMIPS64Test, BareBeqzc) {
- BranchCondOneRegHelper(&mips64::Mips64Assembler::Beqzc, "Beqzc", /* is_bare */ true);
+ BranchCondOneRegHelper(&mips64::Mips64Assembler::Beqzc, "Beqzc", /* is_bare= */ true);
}
TEST_F(AssemblerMIPS64Test, BareBnezc) {
- BranchCondOneRegHelper(&mips64::Mips64Assembler::Bnezc, "Bnezc", /* is_bare */ true);
+ BranchCondOneRegHelper(&mips64::Mips64Assembler::Bnezc, "Bnezc", /* is_bare= */ true);
}
TEST_F(AssemblerMIPS64Test, BareBltzc) {
- BranchCondOneRegHelper(&mips64::Mips64Assembler::Bltzc, "Bltzc", /* is_bare */ true);
+ BranchCondOneRegHelper(&mips64::Mips64Assembler::Bltzc, "Bltzc", /* is_bare= */ true);
}
TEST_F(AssemblerMIPS64Test, BareBgezc) {
- BranchCondOneRegHelper(&mips64::Mips64Assembler::Bgezc, "Bgezc", /* is_bare */ true);
+ BranchCondOneRegHelper(&mips64::Mips64Assembler::Bgezc, "Bgezc", /* is_bare= */ true);
}
TEST_F(AssemblerMIPS64Test, BareBlezc) {
- BranchCondOneRegHelper(&mips64::Mips64Assembler::Blezc, "Blezc", /* is_bare */ true);
+ BranchCondOneRegHelper(&mips64::Mips64Assembler::Blezc, "Blezc", /* is_bare= */ true);
}
TEST_F(AssemblerMIPS64Test, BareBgtzc) {
- BranchCondOneRegHelper(&mips64::Mips64Assembler::Bgtzc, "Bgtzc", /* is_bare */ true);
+ BranchCondOneRegHelper(&mips64::Mips64Assembler::Bgtzc, "Bgtzc", /* is_bare= */ true);
}
TEST_F(AssemblerMIPS64Test, BareBeqc) {
- BranchCondTwoRegsHelper(&mips64::Mips64Assembler::Beqc, "Beqc", /* is_bare */ true);
+ BranchCondTwoRegsHelper(&mips64::Mips64Assembler::Beqc, "Beqc", /* is_bare= */ true);
}
TEST_F(AssemblerMIPS64Test, BareBnec) {
- BranchCondTwoRegsHelper(&mips64::Mips64Assembler::Bnec, "Bnec", /* is_bare */ true);
+ BranchCondTwoRegsHelper(&mips64::Mips64Assembler::Bnec, "Bnec", /* is_bare= */ true);
}
TEST_F(AssemblerMIPS64Test, BareBltc) {
- BranchCondTwoRegsHelper(&mips64::Mips64Assembler::Bltc, "Bltc", /* is_bare */ true);
+ BranchCondTwoRegsHelper(&mips64::Mips64Assembler::Bltc, "Bltc", /* is_bare= */ true);
}
TEST_F(AssemblerMIPS64Test, BareBgec) {
- BranchCondTwoRegsHelper(&mips64::Mips64Assembler::Bgec, "Bgec", /* is_bare */ true);
+ BranchCondTwoRegsHelper(&mips64::Mips64Assembler::Bgec, "Bgec", /* is_bare= */ true);
}
TEST_F(AssemblerMIPS64Test, BareBltuc) {
- BranchCondTwoRegsHelper(&mips64::Mips64Assembler::Bltuc, "Bltuc", /* is_bare */ true);
+ BranchCondTwoRegsHelper(&mips64::Mips64Assembler::Bltuc, "Bltuc", /* is_bare= */ true);
}
TEST_F(AssemblerMIPS64Test, BareBgeuc) {
- BranchCondTwoRegsHelper(&mips64::Mips64Assembler::Bgeuc, "Bgeuc", /* is_bare */ true);
+ BranchCondTwoRegsHelper(&mips64::Mips64Assembler::Bgeuc, "Bgeuc", /* is_bare= */ true);
}
TEST_F(AssemblerMIPS64Test, BareBc1eqz) {
- BranchFpuCondHelper(&mips64::Mips64Assembler::Bc1eqz, "Bc1eqz", /* is_bare */ true);
+ BranchFpuCondHelper(&mips64::Mips64Assembler::Bc1eqz, "Bc1eqz", /* is_bare= */ true);
}
TEST_F(AssemblerMIPS64Test, BareBc1nez) {
- BranchFpuCondHelper(&mips64::Mips64Assembler::Bc1nez, "Bc1nez", /* is_bare */ true);
+ BranchFpuCondHelper(&mips64::Mips64Assembler::Bc1nez, "Bc1nez", /* is_bare= */ true);
}
TEST_F(AssemblerMIPS64Test, BareBeqz) {
- BranchCondOneRegHelper(&mips64::Mips64Assembler::Beqz, "Beqz", /* is_bare */ true);
+ BranchCondOneRegHelper(&mips64::Mips64Assembler::Beqz, "Beqz", /* is_bare= */ true);
}
TEST_F(AssemblerMIPS64Test, BareBnez) {
- BranchCondOneRegHelper(&mips64::Mips64Assembler::Bnez, "Bnez", /* is_bare */ true);
+ BranchCondOneRegHelper(&mips64::Mips64Assembler::Bnez, "Bnez", /* is_bare= */ true);
}
TEST_F(AssemblerMIPS64Test, BareBltz) {
- BranchCondOneRegHelper(&mips64::Mips64Assembler::Bltz, "Bltz", /* is_bare */ true);
+ BranchCondOneRegHelper(&mips64::Mips64Assembler::Bltz, "Bltz", /* is_bare= */ true);
}
TEST_F(AssemblerMIPS64Test, BareBgez) {
- BranchCondOneRegHelper(&mips64::Mips64Assembler::Bgez, "Bgez", /* is_bare */ true);
+ BranchCondOneRegHelper(&mips64::Mips64Assembler::Bgez, "Bgez", /* is_bare= */ true);
}
TEST_F(AssemblerMIPS64Test, BareBlez) {
- BranchCondOneRegHelper(&mips64::Mips64Assembler::Blez, "Blez", /* is_bare */ true);
+ BranchCondOneRegHelper(&mips64::Mips64Assembler::Blez, "Blez", /* is_bare= */ true);
}
TEST_F(AssemblerMIPS64Test, BareBgtz) {
- BranchCondOneRegHelper(&mips64::Mips64Assembler::Bgtz, "Bgtz", /* is_bare */ true);
+ BranchCondOneRegHelper(&mips64::Mips64Assembler::Bgtz, "Bgtz", /* is_bare= */ true);
}
TEST_F(AssemblerMIPS64Test, BareBeq) {
- BranchCondTwoRegsHelper(&mips64::Mips64Assembler::Beq, "Beq", /* is_bare */ true);
+ BranchCondTwoRegsHelper(&mips64::Mips64Assembler::Beq, "Beq", /* is_bare= */ true);
}
TEST_F(AssemblerMIPS64Test, BareBne) {
- BranchCondTwoRegsHelper(&mips64::Mips64Assembler::Bne, "Bne", /* is_bare */ true);
+ BranchCondTwoRegsHelper(&mips64::Mips64Assembler::Bne, "Bne", /* is_bare= */ true);
}
TEST_F(AssemblerMIPS64Test, LongBeqc) {
@@ -1252,7 +1252,7 @@
std::vector<mips64::GpuRegister*> reg1_registers = GetRegisters();
std::vector<mips64::GpuRegister*> reg2_registers = GetRegisters();
reg2_registers.erase(reg2_registers.begin()); // reg2 can't be ZERO, remove it.
- std::vector<int64_t> imms = CreateImmediateValuesBits(/* imm_bits */ 16, /* as_uint */ true);
+ std::vector<int64_t> imms = CreateImmediateValuesBits(/* imm_bits= */ 16, /* as_uint= */ true);
WarnOnCombinations(reg1_registers.size() * reg2_registers.size() * imms.size());
std::ostringstream expected;
for (mips64::GpuRegister* reg1 : reg1_registers) {
diff --git a/compiler/utils/x86/assembler_x86.cc b/compiler/utils/x86/assembler_x86.cc
index 2d1e451..4b073bd 100644
--- a/compiler/utils/x86/assembler_x86.cc
+++ b/compiler/utils/x86/assembler_x86.cc
@@ -2151,7 +2151,7 @@
void X86Assembler::cmpw(const Address& address, const Immediate& imm) {
AssemblerBuffer::EnsureCapacity ensured(&buffer_);
EmitUint8(0x66);
- EmitComplex(7, address, imm, /* is_16_op */ true);
+ EmitComplex(7, address, imm, /* is_16_op= */ true);
}
@@ -2341,7 +2341,7 @@
AssemblerBuffer::EnsureCapacity ensured(&buffer_);
CHECK(imm.is_uint16() || imm.is_int16()) << imm.value();
EmitUint8(0x66);
- EmitComplex(0, address, imm, /* is_16_op */ true);
+ EmitComplex(0, address, imm, /* is_16_op= */ true);
}
diff --git a/compiler/utils/x86_64/assembler_x86_64.cc b/compiler/utils/x86_64/assembler_x86_64.cc
index ae68fe9..c118bc6 100644
--- a/compiler/utils/x86_64/assembler_x86_64.cc
+++ b/compiler/utils/x86_64/assembler_x86_64.cc
@@ -2391,7 +2391,7 @@
CHECK(imm.is_int32());
EmitOperandSizeOverride();
EmitOptionalRex32(address);
- EmitComplex(7, address, imm, /* is_16_op */ true);
+ EmitComplex(7, address, imm, /* is_16_op= */ true);
}
@@ -2805,7 +2805,7 @@
CHECK(imm.is_uint16() || imm.is_int16()) << imm.value();
EmitUint8(0x66);
EmitOptionalRex32(address);
- EmitComplex(0, address, imm, /* is_16_op */ true);
+ EmitComplex(0, address, imm, /* is_16_op= */ true);
}
diff --git a/compiler/utils/x86_64/assembler_x86_64_test.cc b/compiler/utils/x86_64/assembler_x86_64_test.cc
index 528e037..461f028 100644
--- a/compiler/utils/x86_64/assembler_x86_64_test.cc
+++ b/compiler/utils/x86_64/assembler_x86_64_test.cc
@@ -2094,7 +2094,7 @@
ArrayRef<const ManagedRegister> spill_regs(raw_spill_regs);
size_t frame_size = 10 * kStackAlignment;
- assembler->RemoveFrame(frame_size, spill_regs, /* may_suspend */ true);
+ assembler->RemoveFrame(frame_size, spill_regs, /* may_suspend= */ true);
// Construct assembly text counterpart.
std::ostringstream str;
diff --git a/compiler/verifier_deps_test.cc b/compiler/verifier_deps_test.cc
index 8c90aa7..092e931 100644
--- a/compiler/verifier_deps_test.cc
+++ b/compiler/verifier_deps_test.cc
@@ -83,16 +83,16 @@
compiler_driver_->InitializeThreadPools();
}
- void VerifyWithCompilerDriver(verifier::VerifierDeps* deps) {
+ void VerifyWithCompilerDriver(verifier::VerifierDeps* verifier_deps) {
TimingLogger timings("Verify", false, false);
// The compiler driver handles the verifier deps in the callbacks, so
// remove what this class did for unit testing.
- if (deps == nullptr) {
+ if (verifier_deps == nullptr) {
// Create some verifier deps by default if they are not already specified.
- deps = new verifier::VerifierDeps(dex_files_);
- verifier_deps_.reset(deps);
+ verifier_deps = new verifier::VerifierDeps(dex_files_);
+ verifier_deps_.reset(verifier_deps);
}
- callbacks_->SetVerifierDeps(deps);
+ callbacks_->SetVerifierDeps(verifier_deps);
compiler_driver_->Verify(class_loader_, dex_files_, &timings, verification_results_.get());
callbacks_->SetVerifierDeps(nullptr);
// Clear entries in the verification results to avoid hitting a DCHECK that
@@ -147,7 +147,7 @@
hs.NewHandle(soa.Decode<mirror::ClassLoader>(class_loader_)));
Handle<mirror::DexCache> dex_cache_handle(hs.NewHandle(klass_Main_->GetDexCache()));
- const DexFile::ClassDef* class_def = klass_Main_->GetClassDef();
+ const dex::ClassDef* class_def = klass_Main_->GetClassDef();
ClassAccessor accessor(*primary_dex_file_, *class_def);
bool has_failures = true;
@@ -159,7 +159,7 @@
method.GetIndex(),
dex_cache_handle,
class_loader_handle,
- /* referrer */ nullptr,
+ /* referrer= */ nullptr,
method.GetInvokeType(class_def->access_flags_));
CHECK(resolved_method != nullptr);
if (method_name == resolved_method->GetName()) {
@@ -173,12 +173,12 @@
method.GetIndex(),
resolved_method,
method.GetAccessFlags(),
- true /* can_load_classes */,
- true /* allow_soft_failures */,
- true /* need_precise_constants */,
- false /* verify to dump */,
- true /* allow_thread_suspension */,
- 0 /* api_level */);
+ /* can_load_classes= */ true,
+ /* allow_soft_failures= */ true,
+ /* need_precise_constants= */ true,
+ /* verify to dump */ false,
+ /* allow_thread_suspension= */ true,
+ /* api_level= */ 0);
verifier.Verify();
soa.Self()->SetVerifierDeps(nullptr);
has_failures = verifier.HasFailures();
@@ -195,7 +195,7 @@
LoadDexFile(soa, "VerifierDeps", multidex);
}
SetupCompilerDriver();
- VerifyWithCompilerDriver(/* verifier_deps */ nullptr);
+ VerifyWithCompilerDriver(/* verifier_deps= */ nullptr);
}
bool TestAssignabilityRecording(const std::string& dst,
@@ -228,7 +228,7 @@
for (const DexFile* dex_file : dex_files_) {
const std::set<dex::TypeIndex>& unverified_classes = deps.GetUnverifiedClasses(*dex_file);
for (uint32_t i = 0; i < dex_file->NumClassDefs(); ++i) {
- const DexFile::ClassDef& class_def = dex_file->GetClassDef(i);
+ const dex::ClassDef& class_def = dex_file->GetClassDef(i);
const char* descriptor = dex_file->GetClassDescriptor(class_def);
cls.Assign(class_linker_->FindClass(soa.Self(), descriptor, class_loader_handle));
if (cls == nullptr) {
@@ -250,7 +250,7 @@
}
bool HasUnverifiedClass(const std::string& cls, const DexFile& dex_file) {
- const DexFile::TypeId* type_id = dex_file.FindTypeId(cls.c_str());
+ const dex::TypeId* type_id = dex_file.FindTypeId(cls.c_str());
DCHECK(type_id != nullptr);
dex::TypeIndex index = dex_file.GetIndexForTypeId(*type_id);
for (const auto& dex_dep : verifier_deps_->dex_deps_) {
@@ -329,7 +329,7 @@
continue;
}
- const DexFile::FieldId& field_id = dex_dep.first->GetFieldId(entry.GetDexFieldIndex());
+ const dex::FieldId& field_id = dex_dep.first->GetFieldId(entry.GetDexFieldIndex());
std::string actual_klass = dex_dep.first->StringByTypeIdx(field_id.class_idx_);
if (expected_klass != actual_klass) {
@@ -372,16 +372,16 @@
bool HasMethod(const std::string& expected_klass,
const std::string& expected_name,
const std::string& expected_signature,
- bool expected_resolved,
+ bool expect_resolved,
const std::string& expected_access_flags = "",
const std::string& expected_decl_klass = "") {
for (auto& dex_dep : verifier_deps_->dex_deps_) {
for (const VerifierDeps::MethodResolution& entry : dex_dep.second->methods_) {
- if (expected_resolved != entry.IsResolved()) {
+ if (expect_resolved != entry.IsResolved()) {
continue;
}
- const DexFile::MethodId& method_id = dex_dep.first->GetMethodId(entry.GetDexMethodIndex());
+ const dex::MethodId& method_id = dex_dep.first->GetMethodId(entry.GetDexMethodIndex());
std::string actual_klass = dex_dep.first->StringByTypeIdx(method_id.class_idx_);
if (expected_klass != actual_klass) {
@@ -398,7 +398,7 @@
continue;
}
- if (expected_resolved) {
+ if (expect_resolved) {
// Test access flags. Note that PrettyJavaAccessFlags always appends
// a space after the modifiers. Add it to the expected access flags.
std::string actual_access_flags = PrettyJavaAccessFlags(entry.GetAccessFlags());
@@ -482,42 +482,42 @@
}
TEST_F(VerifierDepsTest, Assignable_BothInBoot) {
- ASSERT_TRUE(TestAssignabilityRecording(/* dst */ "Ljava/util/TimeZone;",
- /* src */ "Ljava/util/SimpleTimeZone;",
- /* is_strict */ true,
- /* is_assignable */ true));
+ ASSERT_TRUE(TestAssignabilityRecording(/* dst= */ "Ljava/util/TimeZone;",
+ /* src= */ "Ljava/util/SimpleTimeZone;",
+ /* is_strict= */ true,
+ /* is_assignable= */ true));
ASSERT_TRUE(HasAssignable("Ljava/util/TimeZone;", "Ljava/util/SimpleTimeZone;", true));
}
TEST_F(VerifierDepsTest, Assignable_DestinationInBoot1) {
- ASSERT_TRUE(TestAssignabilityRecording(/* dst */ "Ljava/net/Socket;",
- /* src */ "LMySSLSocket;",
- /* is_strict */ true,
- /* is_assignable */ true));
+ ASSERT_TRUE(TestAssignabilityRecording(/* dst= */ "Ljava/net/Socket;",
+ /* src= */ "LMySSLSocket;",
+ /* is_strict= */ true,
+ /* is_assignable= */ true));
ASSERT_TRUE(HasAssignable("Ljava/net/Socket;", "Ljavax/net/ssl/SSLSocket;", true));
}
TEST_F(VerifierDepsTest, Assignable_DestinationInBoot2) {
- ASSERT_TRUE(TestAssignabilityRecording(/* dst */ "Ljava/util/TimeZone;",
- /* src */ "LMySimpleTimeZone;",
- /* is_strict */ true,
- /* is_assignable */ true));
+ ASSERT_TRUE(TestAssignabilityRecording(/* dst= */ "Ljava/util/TimeZone;",
+ /* src= */ "LMySimpleTimeZone;",
+ /* is_strict= */ true,
+ /* is_assignable= */ true));
ASSERT_TRUE(HasAssignable("Ljava/util/TimeZone;", "Ljava/util/SimpleTimeZone;", true));
}
TEST_F(VerifierDepsTest, Assignable_DestinationInBoot3) {
- ASSERT_TRUE(TestAssignabilityRecording(/* dst */ "Ljava/util/Collection;",
- /* src */ "LMyThreadSet;",
- /* is_strict */ true,
- /* is_assignable */ true));
+ ASSERT_TRUE(TestAssignabilityRecording(/* dst= */ "Ljava/util/Collection;",
+ /* src= */ "LMyThreadSet;",
+ /* is_strict= */ true,
+ /* is_assignable= */ true));
ASSERT_TRUE(HasAssignable("Ljava/util/Collection;", "Ljava/util/Set;", true));
}
TEST_F(VerifierDepsTest, Assignable_BothArrays_Resolved) {
- ASSERT_TRUE(TestAssignabilityRecording(/* dst */ "[[Ljava/util/TimeZone;",
- /* src */ "[[Ljava/util/SimpleTimeZone;",
- /* is_strict */ true,
- /* is_assignable */ true));
+ ASSERT_TRUE(TestAssignabilityRecording(/* dst= */ "[[Ljava/util/TimeZone;",
+ /* src= */ "[[Ljava/util/SimpleTimeZone;",
+ /* is_strict= */ true,
+ /* is_assignable= */ true));
// If the component types of both arrays are resolved, we optimize the list of
// dependencies by recording a dependency on the component types.
ASSERT_FALSE(HasAssignable("[[Ljava/util/TimeZone;", "[[Ljava/util/SimpleTimeZone;", true));
@@ -526,34 +526,34 @@
}
TEST_F(VerifierDepsTest, NotAssignable_BothInBoot) {
- ASSERT_TRUE(TestAssignabilityRecording(/* dst */ "Ljava/lang/Exception;",
- /* src */ "Ljava/util/SimpleTimeZone;",
- /* is_strict */ true,
- /* is_assignable */ false));
+ ASSERT_TRUE(TestAssignabilityRecording(/* dst= */ "Ljava/lang/Exception;",
+ /* src= */ "Ljava/util/SimpleTimeZone;",
+ /* is_strict= */ true,
+ /* is_assignable= */ false));
ASSERT_TRUE(HasAssignable("Ljava/lang/Exception;", "Ljava/util/SimpleTimeZone;", false));
}
TEST_F(VerifierDepsTest, NotAssignable_DestinationInBoot1) {
- ASSERT_TRUE(TestAssignabilityRecording(/* dst */ "Ljava/lang/Exception;",
- /* src */ "LMySSLSocket;",
- /* is_strict */ true,
- /* is_assignable */ false));
+ ASSERT_TRUE(TestAssignabilityRecording(/* dst= */ "Ljava/lang/Exception;",
+ /* src= */ "LMySSLSocket;",
+ /* is_strict= */ true,
+ /* is_assignable= */ false));
ASSERT_TRUE(HasAssignable("Ljava/lang/Exception;", "Ljavax/net/ssl/SSLSocket;", false));
}
TEST_F(VerifierDepsTest, NotAssignable_DestinationInBoot2) {
- ASSERT_TRUE(TestAssignabilityRecording(/* dst */ "Ljava/lang/Exception;",
- /* src */ "LMySimpleTimeZone;",
- /* is_strict */ true,
- /* is_assignable */ false));
+ ASSERT_TRUE(TestAssignabilityRecording(/* dst= */ "Ljava/lang/Exception;",
+ /* src= */ "LMySimpleTimeZone;",
+ /* is_strict= */ true,
+ /* is_assignable= */ false));
ASSERT_TRUE(HasAssignable("Ljava/lang/Exception;", "Ljava/util/SimpleTimeZone;", false));
}
TEST_F(VerifierDepsTest, NotAssignable_BothArrays) {
- ASSERT_TRUE(TestAssignabilityRecording(/* dst */ "[Ljava/lang/Exception;",
- /* src */ "[Ljava/util/SimpleTimeZone;",
- /* is_strict */ true,
- /* is_assignable */ false));
+ ASSERT_TRUE(TestAssignabilityRecording(/* dst= */ "[Ljava/lang/Exception;",
+ /* src= */ "[Ljava/util/SimpleTimeZone;",
+ /* is_strict= */ true,
+ /* is_assignable= */ false));
ASSERT_TRUE(HasAssignable("Ljava/lang/Exception;", "Ljava/util/SimpleTimeZone;", false));
}
@@ -589,7 +589,7 @@
ASSERT_TRUE(HasMethod("Ljava/text/SimpleDateFormat;",
"setTimeZone",
"(Ljava/util/TimeZone;)V",
- /* expect_resolved */ true,
+ /* expect_resolved= */ true,
"public",
"Ljava/text/DateFormat;"));
ASSERT_TRUE(HasAssignable("Ljava/util/TimeZone;", "Ljava/util/SimpleTimeZone;", true));
@@ -824,7 +824,7 @@
ASSERT_TRUE(HasMethod("Ljava/net/Socket;",
"setSocketImplFactory",
"(Ljava/net/SocketImplFactory;)V",
- /* expect_resolved */ true,
+ /* expect_resolved= */ true,
"public static",
"Ljava/net/Socket;"));
}
@@ -835,7 +835,7 @@
ASSERT_TRUE(HasMethod("Ljavax/net/ssl/SSLSocket;",
"setSocketImplFactory",
"(Ljava/net/SocketImplFactory;)V",
- /* expect_resolved */ true,
+ /* expect_resolved= */ true,
"public static",
"Ljava/net/Socket;"));
}
@@ -845,7 +845,7 @@
ASSERT_TRUE(HasMethod("LMySSLSocket;",
"setSocketImplFactory",
"(Ljava/net/SocketImplFactory;)V",
- /* expect_resolved */ true,
+ /* expect_resolved= */ true,
"public static",
"Ljava/net/Socket;"));
}
@@ -856,7 +856,7 @@
ASSERT_TRUE(HasMethod("Ljava/util/Map$Entry;",
"comparingByKey",
"()Ljava/util/Comparator;",
- /* expect_resolved */ true,
+ /* expect_resolved= */ true,
"public static",
"Ljava/util/Map$Entry;"));
}
@@ -867,7 +867,7 @@
ASSERT_TRUE(HasMethod("Ljava/util/AbstractMap$SimpleEntry;",
"comparingByKey",
"()Ljava/util/Comparator;",
- /* expect_resolved */ false));
+ /* expect_resolved= */ false));
}
TEST_F(VerifierDepsTest, InvokeStatic_Unresolved1) {
@@ -876,7 +876,7 @@
ASSERT_TRUE(HasMethod("Ljavax/net/ssl/SSLSocket;",
"x",
"()V",
- /* expect_resolved */ false));
+ /* expect_resolved= */ false));
}
TEST_F(VerifierDepsTest, InvokeStatic_Unresolved2) {
@@ -884,7 +884,7 @@
ASSERT_TRUE(HasMethod("LMySSLSocket;",
"x",
"()V",
- /* expect_resolved */ false));
+ /* expect_resolved= */ false));
}
TEST_F(VerifierDepsTest, InvokeDirect_Resolved_DeclaredInReferenced) {
@@ -893,7 +893,7 @@
ASSERT_TRUE(HasMethod("Ljava/net/Socket;",
"<init>",
"()V",
- /* expect_resolved */ true,
+ /* expect_resolved= */ true,
"public",
"Ljava/net/Socket;"));
}
@@ -904,7 +904,7 @@
ASSERT_TRUE(HasMethod("Ljavax/net/ssl/SSLSocket;",
"checkOldImpl",
"()V",
- /* expect_resolved */ true,
+ /* expect_resolved= */ true,
"private",
"Ljava/net/Socket;"));
}
@@ -914,7 +914,7 @@
ASSERT_TRUE(HasMethod("LMySSLSocket;",
"checkOldImpl",
"()V",
- /* expect_resolved */ true,
+ /* expect_resolved= */ true,
"private",
"Ljava/net/Socket;"));
}
@@ -925,7 +925,7 @@
ASSERT_TRUE(HasMethod("Ljavax/net/ssl/SSLSocket;",
"x",
"()V",
- /* expect_resolved */ false));
+ /* expect_resolved= */ false));
}
TEST_F(VerifierDepsTest, InvokeDirect_Unresolved2) {
@@ -933,7 +933,7 @@
ASSERT_TRUE(HasMethod("LMySSLSocket;",
"x",
"()V",
- /* expect_resolved */ false));
+ /* expect_resolved= */ false));
}
TEST_F(VerifierDepsTest, InvokeVirtual_Resolved_DeclaredInReferenced) {
@@ -942,7 +942,7 @@
ASSERT_TRUE(HasMethod("Ljava/lang/Throwable;",
"getMessage",
"()Ljava/lang/String;",
- /* expect_resolved */ true,
+ /* expect_resolved= */ true,
"public",
"Ljava/lang/Throwable;"));
// Type dependency on `this` argument.
@@ -955,7 +955,7 @@
ASSERT_TRUE(HasMethod("Ljava/io/InterruptedIOException;",
"getMessage",
"()Ljava/lang/String;",
- /* expect_resolved */ true,
+ /* expect_resolved= */ true,
"public",
"Ljava/lang/Throwable;"));
// Type dependency on `this` argument.
@@ -967,7 +967,7 @@
ASSERT_TRUE(HasMethod("LMySocketTimeoutException;",
"getMessage",
"()Ljava/lang/String;",
- /* expect_resolved */ true,
+ /* expect_resolved= */ true,
"public",
"Ljava/lang/Throwable;"));
}
@@ -977,7 +977,7 @@
ASSERT_TRUE(HasMethod("LMyThreadSet;",
"size",
"()I",
- /* expect_resolved */ true,
+ /* expect_resolved= */ true,
"public",
"Ljava/util/Set;"));
}
@@ -988,7 +988,7 @@
ASSERT_TRUE(HasMethod("Ljava/io/InterruptedIOException;",
"x",
"()V",
- /* expect_resolved */ false));
+ /* expect_resolved= */ false));
}
TEST_F(VerifierDepsTest, InvokeVirtual_Unresolved2) {
@@ -996,7 +996,7 @@
ASSERT_TRUE(HasMethod("LMySocketTimeoutException;",
"x",
"()V",
- /* expect_resolved */ false));
+ /* expect_resolved= */ false));
}
TEST_F(VerifierDepsTest, InvokeInterface_Resolved_DeclaredInReferenced) {
@@ -1005,7 +1005,7 @@
ASSERT_TRUE(HasMethod("Ljava/lang/Runnable;",
"run",
"()V",
- /* expect_resolved */ true,
+ /* expect_resolved= */ true,
"public",
"Ljava/lang/Runnable;"));
}
@@ -1016,7 +1016,7 @@
ASSERT_TRUE(HasMethod("LMyThread;",
"join",
"()V",
- /* expect_resolved */ true,
+ /* expect_resolved= */ true,
"public",
"Ljava/lang/Thread;"));
}
@@ -1027,7 +1027,7 @@
ASSERT_TRUE(HasMethod("LMyThreadSet;",
"run",
"()V",
- /* expect_resolved */ true,
+ /* expect_resolved= */ true,
"public",
"Ljava/lang/Thread;"));
}
@@ -1037,7 +1037,7 @@
ASSERT_TRUE(HasMethod("LMyThreadSet;",
"isEmpty",
"()Z",
- /* expect_resolved */ true,
+ /* expect_resolved= */ true,
"public",
"Ljava/util/Set;"));
}
@@ -1048,12 +1048,12 @@
ASSERT_TRUE(HasMethod("Ljava/lang/Runnable;",
"x",
"()V",
- /* expect_resolved */ false));
+ /* expect_resolved= */ false));
}
TEST_F(VerifierDepsTest, InvokeInterface_Unresolved2) {
ASSERT_FALSE(VerifyMethod("InvokeInterface_Unresolved2"));
- ASSERT_TRUE(HasMethod("LMyThreadSet;", "x", "()V", /* expect_resolved */ false));
+ ASSERT_TRUE(HasMethod("LMyThreadSet;", "x", "()V", /* expect_resolved= */ false));
}
TEST_F(VerifierDepsTest, InvokeSuper_ThisAssignable) {
@@ -1063,7 +1063,7 @@
ASSERT_TRUE(HasMethod("Ljava/lang/Runnable;",
"run",
"()V",
- /* expect_resolved */ true,
+ /* expect_resolved= */ true,
"public",
"Ljava/lang/Runnable;"));
}
@@ -1074,7 +1074,7 @@
ASSERT_TRUE(HasAssignable("Ljava/lang/Integer;", "Ljava/lang/Thread;", false));
ASSERT_TRUE(HasMethod("Ljava/lang/Integer;",
"intValue", "()I",
- /* expect_resolved */ true,
+ /* expect_resolved= */ true,
"public", "Ljava/lang/Integer;"));
}
@@ -1443,7 +1443,7 @@
ScopedObjectAccess soa(Thread::Current());
LoadDexFile(soa, "VerifierDeps", multi);
}
- VerifyWithCompilerDriver(/* verifier_deps */ nullptr);
+ VerifyWithCompilerDriver(/* verifier_deps= */ nullptr);
std::vector<uint8_t> buffer;
verifier_deps_->Encode(dex_files_, &buffer);
@@ -1493,22 +1493,22 @@
}
TEST_F(VerifierDepsTest, NotAssignable_InterfaceWithClassInBoot) {
- ASSERT_TRUE(TestAssignabilityRecording(/* dst */ "Ljava/lang/Exception;",
- /* src */ "LIface;",
- /* is_strict */ true,
- /* is_assignable */ false));
+ ASSERT_TRUE(TestAssignabilityRecording(/* dst= */ "Ljava/lang/Exception;",
+ /* src= */ "LIface;",
+ /* is_strict= */ true,
+ /* is_assignable= */ false));
ASSERT_TRUE(HasAssignable("Ljava/lang/Exception;", "LIface;", false));
}
TEST_F(VerifierDepsTest, Assignable_Arrays) {
- ASSERT_TRUE(TestAssignabilityRecording(/* dst */ "[LIface;",
- /* src */ "[LMyClassExtendingInterface;",
- /* is_strict */ false,
- /* is_assignable */ true));
+ ASSERT_TRUE(TestAssignabilityRecording(/* dst= */ "[LIface;",
+ /* src= */ "[LMyClassExtendingInterface;",
+ /* is_strict= */ false,
+ /* is_assignable= */ true));
ASSERT_FALSE(HasAssignable(
- "LIface;", "LMyClassExtendingInterface;", /* expected_is_assignable */ true));
+ "LIface;", "LMyClassExtendingInterface;", /* expected_is_assignable= */ true));
ASSERT_FALSE(HasAssignable(
- "LIface;", "LMyClassExtendingInterface;", /* expected_is_assignable */ false));
+ "LIface;", "LMyClassExtendingInterface;", /* expected_is_assignable= */ false));
}
} // namespace verifier
diff --git a/dex2oat/Android.bp b/dex2oat/Android.bp
index f8bdb16..3a2ae75 100644
--- a/dex2oat/Android.bp
+++ b/dex2oat/Android.bp
@@ -114,7 +114,7 @@
output_extension: "operator_out.cc",
}
-art_cc_static_library {
+art_cc_library_static {
name: "libart-dex2oat",
defaults: ["libart-dex2oat-defaults"],
shared_libs: [
@@ -139,7 +139,7 @@
],
}
-art_cc_static_library {
+art_cc_library_static {
name: "libartd-dex2oat",
defaults: [
"art_debug_defaults",
diff --git a/dex2oat/dex2oat.cc b/dex2oat/dex2oat.cc
index 0b2c0b6..ea4158a 100644
--- a/dex2oat/dex2oat.cc
+++ b/dex2oat/dex2oat.cc
@@ -479,14 +479,13 @@
UsageError(" --compilation-reason=<string>: optional metadata specifying the reason for");
UsageError(" compiling the apk. If specified, the string will be embedded verbatim in");
UsageError(" the key value store of the oat file.");
+ UsageError(" Example: --compilation-reason=install");
UsageError("");
UsageError(" --resolve-startup-const-strings=true|false: If true, the compiler eagerly");
UsageError(" resolves strings referenced from const-string of startup methods.");
UsageError("");
UsageError(" --max-image-block-size=<size>: Maximum solid block size for compressed images.");
UsageError("");
- UsageError(" Example: --compilation-reason=install");
- UsageError("");
std::cerr << "See log for usage error information\n";
exit(EXIT_FAILURE);
}
diff --git a/dex2oat/dex2oat_test.cc b/dex2oat/dex2oat_test.cc
index 92dd932..fd454f0 100644
--- a/dex2oat/dex2oat_test.cc
+++ b/dex2oat/dex2oat_test.cc
@@ -1331,7 +1331,7 @@
// first.
std::vector<uint16_t> methods;
{
- const DexFile::TypeId* type_id = dex->FindTypeId("LManyMethods;");
+ const dex::TypeId* type_id = dex->FindTypeId("LManyMethods;");
dex::TypeIndex type_idx = dex->GetIndexForTypeId(*type_id);
ClassAccessor accessor(*dex, *dex->FindClassDef(type_idx));
std::set<size_t> code_item_offsets;
@@ -1431,10 +1431,10 @@
// we expect.
std::unique_ptr<const DexFile> dex_file(oat_dex->OpenDexFile(&error_msg));
ASSERT_TRUE(dex_file != nullptr) << error_msg;
- const DexFile::TypeId* type_id = dex_file->FindTypeId("LManyMethods;");
+ const dex::TypeId* type_id = dex_file->FindTypeId("LManyMethods;");
ASSERT_TRUE(type_id != nullptr);
dex::TypeIndex type_idx = dex_file->GetIndexForTypeId(*type_id);
- const DexFile::ClassDef* class_def = dex_file->FindClassDef(type_idx);
+ const dex::ClassDef* class_def = dex_file->FindClassDef(type_idx);
ASSERT_TRUE(class_def != nullptr);
// Count how many code items are for each category, there should be at least one per category.
diff --git a/dex2oat/linker/image_test.h b/dex2oat/linker/image_test.h
index fa0a3d4..8c9dfb8 100644
--- a/dex2oat/linker/image_test.h
+++ b/dex2oat/linker/image_test.h
@@ -36,6 +36,7 @@
#include "compiler_callbacks.h"
#include "debug/method_debug_info.h"
#include "dex/quick_compiler_callbacks.h"
+#include "dex/signature-inl.h"
#include "driver/compiler_driver.h"
#include "driver/compiler_options.h"
#include "gc/space/image_space.h"
@@ -492,7 +493,7 @@
CHECK_EQ(kRequestedImageBase, reinterpret_cast<uintptr_t>(image_begin));
}
for (size_t j = 0; j < dex->NumClassDefs(); ++j) {
- const DexFile::ClassDef& class_def = dex->GetClassDef(j);
+ const dex::ClassDef& class_def = dex->GetClassDef(j);
const char* descriptor = dex->GetClassDescriptor(class_def);
ObjPtr<mirror::Class> klass = class_linker_->FindSystemClass(soa.Self(), descriptor);
EXPECT_TRUE(klass != nullptr) << descriptor;
diff --git a/dex2oat/linker/image_writer.cc b/dex2oat/linker/image_writer.cc
index e4e4b13..1331fc3 100644
--- a/dex2oat/linker/image_writer.cc
+++ b/dex2oat/linker/image_writer.cc
@@ -1586,7 +1586,7 @@
// Check if the referenced class is in the image. Note that we want to check the referenced
// class rather than the declaring class to preserve the semantics, i.e. using a MethodId
// results in resolving the referenced class and that can for example throw OOME.
- const DexFile::MethodId& method_id = dex_file.GetMethodId(stored_index);
+ const dex::MethodId& method_id = dex_file.GetMethodId(stored_index);
if (method_id.class_idx_ != last_class_idx) {
last_class_idx = method_id.class_idx_;
last_class = class_linker->LookupResolvedType(last_class_idx, dex_cache, class_loader);
@@ -1612,7 +1612,7 @@
// Check if the referenced class is in the image. Note that we want to check the referenced
// class rather than the declaring class to preserve the semantics, i.e. using a FieldId
// results in resolving the referenced class and that can for example throw OOME.
- const DexFile::FieldId& field_id = dex_file.GetFieldId(stored_index);
+ const dex::FieldId& field_id = dex_file.GetFieldId(stored_index);
if (field_id.class_idx_ != last_class_idx) {
last_class_idx = field_id.class_idx_;
last_class = class_linker->LookupResolvedType(last_class_idx, dex_cache, class_loader);
@@ -1663,7 +1663,7 @@
// Check if the referenced class is in the image. Note that we want to check the referenced
// class rather than the declaring class to preserve the semantics, i.e. using a MethodId
// results in resolving the referenced class and that can for example throw OOME.
- const DexFile::MethodId& method_id = dex_file.GetMethodId(i);
+ const dex::MethodId& method_id = dex_file.GetMethodId(i);
if (method_id.class_idx_ != last_class_idx) {
last_class_idx = method_id.class_idx_;
last_class = class_linker->LookupResolvedType(last_class_idx, dex_cache, class_loader);
@@ -1695,7 +1695,7 @@
// Check if the referenced class is in the image. Note that we want to check the referenced
// class rather than the declaring class to preserve the semantics, i.e. using a FieldId
// results in resolving the referenced class and that can for example throw OOME.
- const DexFile::FieldId& field_id = dex_file.GetFieldId(i);
+ const dex::FieldId& field_id = dex_file.GetFieldId(i);
if (field_id.class_idx_ != last_class_idx) {
last_class_idx = field_id.class_idx_;
last_class = class_linker->LookupResolvedType(last_class_idx, dex_cache, class_loader);
diff --git a/dex2oat/linker/oat_writer.cc b/dex2oat/linker/oat_writer.cc
index e2a9ac2..be9a0cb 100644
--- a/dex2oat/linker/oat_writer.cc
+++ b/dex2oat/linker/oat_writer.cc
@@ -1011,7 +1011,7 @@
size_t class_def_index;
uint32_t access_flags;
- const DexFile::CodeItem* code_item;
+ const dex::CodeItem* code_item;
// A value of -1 denotes missing debug info
static constexpr size_t kDebugInfoIdxInvalid = static_cast<size_t>(-1);
@@ -1506,7 +1506,7 @@
return true;
}
ObjPtr<mirror::DexCache> dex_cache = class_linker_->FindDexCache(Thread::Current(), *dex_file);
- const DexFile::ClassDef& class_def = dex_file->GetClassDef(class_def_index);
+ const dex::ClassDef& class_def = dex_file->GetClassDef(class_def_index);
ObjPtr<mirror::Class> klass =
class_linker_->LookupResolvedType(class_def.class_idx_, dex_cache, class_loader_);
if (klass != nullptr) {
@@ -1585,7 +1585,7 @@
// Check whether current class is image class
bool IsImageClass() {
- const DexFile::TypeId& type_id =
+ const dex::TypeId& type_id =
dex_file_->GetTypeId(dex_file_->GetClassDef(class_def_index_).class_idx_);
const char* class_descriptor = dex_file_->GetTypeDescriptor(type_id);
return writer_->GetCompilerOptions().IsImageClass(class_descriptor);
diff --git a/dexdump/dexdump.cc b/dexdump/dexdump.cc
index 2b59342..c23524a 100644
--- a/dexdump/dexdump.cc
+++ b/dexdump/dexdump.cc
@@ -492,13 +492,13 @@
case DexFile::kDexAnnotationField:
case DexFile::kDexAnnotationEnum: {
const u4 field_idx = static_cast<u4>(readVarWidth(data, arg, false));
- const DexFile::FieldId& pFieldId = pDexFile->GetFieldId(field_idx);
+ const dex::FieldId& pFieldId = pDexFile->GetFieldId(field_idx);
fputs(pDexFile->StringDataByIdx(pFieldId.name_idx_), gOutFile);
break;
}
case DexFile::kDexAnnotationMethod: {
const u4 method_idx = static_cast<u4>(readVarWidth(data, arg, false));
- const DexFile::MethodId& pMethodId = pDexFile->GetMethodId(method_idx);
+ const dex::MethodId& pMethodId = pDexFile->GetMethodId(method_idx);
fputs(pDexFile->StringDataByIdx(pMethodId.name_idx_), gOutFile);
break;
}
@@ -594,7 +594,7 @@
*/
static void dumpClassDef(const DexFile* pDexFile, int idx) {
// General class information.
- const DexFile::ClassDef& pClassDef = pDexFile->GetClassDef(idx);
+ const dex::ClassDef& pClassDef = pDexFile->GetClassDef(idx);
fprintf(gOutFile, "Class #%d header:\n", idx);
fprintf(gOutFile, "class_idx : %d\n", pClassDef.class_idx_.index_);
fprintf(gOutFile, "access_flags : %d (0x%04x)\n",
@@ -620,13 +620,13 @@
/**
* Dumps an annotation set item.
*/
-static void dumpAnnotationSetItem(const DexFile* pDexFile, const DexFile::AnnotationSetItem* set_item) {
+static void dumpAnnotationSetItem(const DexFile* pDexFile, const dex::AnnotationSetItem* set_item) {
if (set_item == nullptr || set_item->size_ == 0) {
fputs(" empty-annotation-set\n", gOutFile);
return;
}
for (u4 i = 0; i < set_item->size_; i++) {
- const DexFile::AnnotationItem* annotation = pDexFile->GetAnnotationItem(set_item, i);
+ const dex::AnnotationItem* annotation = pDexFile->GetAnnotationItem(set_item, i);
if (annotation == nullptr) {
continue;
}
@@ -648,18 +648,18 @@
* Dumps class annotations.
*/
static void dumpClassAnnotations(const DexFile* pDexFile, int idx) {
- const DexFile::ClassDef& pClassDef = pDexFile->GetClassDef(idx);
- const DexFile::AnnotationsDirectoryItem* dir = pDexFile->GetAnnotationsDirectory(pClassDef);
+ const dex::ClassDef& pClassDef = pDexFile->GetClassDef(idx);
+ const dex::AnnotationsDirectoryItem* dir = pDexFile->GetAnnotationsDirectory(pClassDef);
if (dir == nullptr) {
return; // none
}
fprintf(gOutFile, "Class #%d annotations:\n", idx);
- const DexFile::AnnotationSetItem* class_set_item = pDexFile->GetClassAnnotationSet(dir);
- const DexFile::FieldAnnotationsItem* fields = pDexFile->GetFieldAnnotations(dir);
- const DexFile::MethodAnnotationsItem* methods = pDexFile->GetMethodAnnotations(dir);
- const DexFile::ParameterAnnotationsItem* pars = pDexFile->GetParameterAnnotations(dir);
+ const dex::AnnotationSetItem* class_set_item = pDexFile->GetClassAnnotationSet(dir);
+ const dex::FieldAnnotationsItem* fields = pDexFile->GetFieldAnnotations(dir);
+ const dex::MethodAnnotationsItem* methods = pDexFile->GetMethodAnnotations(dir);
+ const dex::ParameterAnnotationsItem* pars = pDexFile->GetParameterAnnotations(dir);
// Annotations on the class itself.
if (class_set_item != nullptr) {
@@ -671,7 +671,7 @@
if (fields != nullptr) {
for (u4 i = 0; i < dir->fields_size_; i++) {
const u4 field_idx = fields[i].field_idx_;
- const DexFile::FieldId& pFieldId = pDexFile->GetFieldId(field_idx);
+ const dex::FieldId& pFieldId = pDexFile->GetFieldId(field_idx);
const char* field_name = pDexFile->StringDataByIdx(pFieldId.name_idx_);
fprintf(gOutFile, "Annotations on field #%u '%s'\n", field_idx, field_name);
dumpAnnotationSetItem(pDexFile, pDexFile->GetFieldAnnotationSetItem(fields[i]));
@@ -682,7 +682,7 @@
if (methods != nullptr) {
for (u4 i = 0; i < dir->methods_size_; i++) {
const u4 method_idx = methods[i].method_idx_;
- const DexFile::MethodId& pMethodId = pDexFile->GetMethodId(method_idx);
+ const dex::MethodId& pMethodId = pDexFile->GetMethodId(method_idx);
const char* method_name = pDexFile->StringDataByIdx(pMethodId.name_idx_);
fprintf(gOutFile, "Annotations on method #%u '%s'\n", method_idx, method_name);
dumpAnnotationSetItem(pDexFile, pDexFile->GetMethodAnnotationSetItem(methods[i]));
@@ -693,10 +693,10 @@
if (pars != nullptr) {
for (u4 i = 0; i < dir->parameters_size_; i++) {
const u4 method_idx = pars[i].method_idx_;
- const DexFile::MethodId& pMethodId = pDexFile->GetMethodId(method_idx);
+ const dex::MethodId& pMethodId = pDexFile->GetMethodId(method_idx);
const char* method_name = pDexFile->StringDataByIdx(pMethodId.name_idx_);
fprintf(gOutFile, "Annotations on method #%u '%s' parameters\n", method_idx, method_name);
- const DexFile::AnnotationSetRefList*
+ const dex::AnnotationSetRefList*
list = pDexFile->GetParameterAnnotationSetRefList(&pars[i]);
if (list != nullptr) {
for (u4 j = 0; j < list->size_; j++) {
@@ -713,7 +713,7 @@
/*
* Dumps an interface that a class declares to implement.
*/
-static void dumpInterface(const DexFile* pDexFile, const DexFile::TypeItem& pTypeItem, int i) {
+static void dumpInterface(const DexFile* pDexFile, const dex::TypeItem& pTypeItem, int i) {
const char* interfaceName = pDexFile->StringByTypeIdx(pTypeItem.type_idx_);
if (gOptions.outputFormat == OUTPUT_PLAIN) {
fprintf(gOutFile, " #%d : '%s'\n", i, interfaceName);
@@ -726,7 +726,7 @@
/*
* Dumps the catches table associated with the code.
*/
-static void dumpCatches(const DexFile* pDexFile, const DexFile::CodeItem* pCode) {
+static void dumpCatches(const DexFile* pDexFile, const dex::CodeItem* pCode) {
CodeItemDataAccessor accessor(*pDexFile, pCode);
const u4 triesSize = accessor.TriesSize();
@@ -738,7 +738,7 @@
// Dump all table entries.
fprintf(gOutFile, " catches : %d\n", triesSize);
- for (const DexFile::TryItem& try_item : accessor.TryItems()) {
+ for (const dex::TryItem& try_item : accessor.TryItems()) {
const u4 start = try_item.start_addr_;
const u4 end = start + try_item.insn_count_;
fprintf(gOutFile, " 0x%04x - 0x%04x\n", start, end);
@@ -826,7 +826,7 @@
break;
case Instruction::kIndexMethodRef:
if (index < pDexFile->GetHeader().method_ids_size_) {
- const DexFile::MethodId& pMethodId = pDexFile->GetMethodId(index);
+ const dex::MethodId& pMethodId = pDexFile->GetMethodId(index);
const char* name = pDexFile->StringDataByIdx(pMethodId.name_idx_);
const Signature signature = pDexFile->GetMethodSignature(pMethodId);
const char* backDescriptor = pDexFile->StringByTypeIdx(pMethodId.class_idx_);
@@ -838,7 +838,7 @@
break;
case Instruction::kIndexFieldRef:
if (index < pDexFile->GetHeader().field_ids_size_) {
- const DexFile::FieldId& pFieldId = pDexFile->GetFieldId(index);
+ const dex::FieldId& pFieldId = pDexFile->GetFieldId(index);
const char* name = pDexFile->StringDataByIdx(pFieldId.name_idx_);
const char* typeDescriptor = pDexFile->StringByTypeIdx(pFieldId.type_idx_);
const char* backDescriptor = pDexFile->StringByTypeIdx(pFieldId.class_idx_);
@@ -859,7 +859,7 @@
std::string method("<method?>");
std::string proto("<proto?>");
if (index < pDexFile->GetHeader().method_ids_size_) {
- const DexFile::MethodId& pMethodId = pDexFile->GetMethodId(index);
+ const dex::MethodId& pMethodId = pDexFile->GetMethodId(index);
const char* name = pDexFile->StringDataByIdx(pMethodId.name_idx_);
const Signature signature = pDexFile->GetMethodSignature(pMethodId);
const char* backDescriptor = pDexFile->StringByTypeIdx(pMethodId.class_idx_);
@@ -869,7 +869,7 @@
signature.ToString().c_str());
}
if (secondary_index < pDexFile->GetHeader().proto_ids_size_) {
- const DexFile::ProtoId& protoId = pDexFile->GetProtoId(dex::ProtoIndex(secondary_index));
+ const dex::ProtoId& protoId = pDexFile->GetProtoId(dex::ProtoIndex(secondary_index));
const Signature signature = pDexFile->GetProtoSignature(protoId);
proto = signature.ToString();
}
@@ -887,7 +887,7 @@
break;
case Instruction::kIndexProtoRef:
if (index < pDexFile->GetHeader().proto_ids_size_) {
- const DexFile::ProtoId& protoId = pDexFile->GetProtoId(dex::ProtoIndex(index));
+ const dex::ProtoId& protoId = pDexFile->GetProtoId(dex::ProtoIndex(index));
const Signature signature = pDexFile->GetProtoSignature(protoId);
const std::string& proto = signature.ToString();
outSize = snprintf(buf.get(), bufSize, "%s // proto@%0*x", proto.c_str(), width, index);
@@ -916,7 +916,7 @@
* Dumps a single instruction.
*/
static void dumpInstruction(const DexFile* pDexFile,
- const DexFile::CodeItem* pCode,
+ const dex::CodeItem* pCode,
u4 codeOffset, u4 insnIdx, u4 insnWidth,
const Instruction* pDecInsn) {
// Address of instruction (expressed as byte offset).
@@ -1129,8 +1129,8 @@
* Dumps a bytecode disassembly.
*/
static void dumpBytecodes(const DexFile* pDexFile, u4 idx,
- const DexFile::CodeItem* pCode, u4 codeOffset) {
- const DexFile::MethodId& pMethodId = pDexFile->GetMethodId(idx);
+ const dex::CodeItem* pCode, u4 codeOffset) {
+ const dex::MethodId& pMethodId = pDexFile->GetMethodId(idx);
const char* name = pDexFile->StringDataByIdx(pMethodId.name_idx_);
const Signature signature = pDexFile->GetMethodSignature(pMethodId);
const char* backDescriptor = pDexFile->StringByTypeIdx(pMethodId.class_idx_);
@@ -1163,7 +1163,7 @@
* Dumps code of a method.
*/
static void dumpCode(const DexFile* pDexFile, u4 idx, u4 flags,
- const DexFile::CodeItem* pCode, u4 codeOffset) {
+ const dex::CodeItem* pCode, u4 codeOffset) {
CodeItemDebugInfoAccessor accessor(*pDexFile, pCode, idx);
fprintf(gOutFile, " registers : %d\n", accessor.RegistersSize());
@@ -1214,7 +1214,7 @@
}
const DexFile& dex_file = method.GetDexFile();
- const DexFile::MethodId& pMethodId = dex_file.GetMethodId(method.GetIndex());
+ const dex::MethodId& pMethodId = dex_file.GetMethodId(method.GetIndex());
const char* name = dex_file.StringDataByIdx(pMethodId.name_idx_);
const Signature signature = dex_file.GetMethodSignature(pMethodId);
char* typeDescriptor = strdup(signature.ToString().c_str());
@@ -1325,7 +1325,7 @@
}
const DexFile& dex_file = field.GetDexFile();
- const DexFile::FieldId& field_id = dex_file.GetFieldId(field.GetIndex());
+ const dex::FieldId& field_id = dex_file.GetFieldId(field.GetIndex());
const char* name = dex_file.StringDataByIdx(field_id.name_idx_);
const char* typeDescriptor = dex_file.StringByTypeIdx(field_id.type_idx_);
const char* backDescriptor = dex_file.StringByTypeIdx(field_id.class_idx_);
@@ -1386,7 +1386,7 @@
* the value will be replaced with a newly-allocated string.
*/
static void dumpClass(const DexFile* pDexFile, int idx, char** pLastPackage) {
- const DexFile::ClassDef& pClassDef = pDexFile->GetClassDef(idx);
+ const dex::ClassDef& pClassDef = pDexFile->GetClassDef(idx);
// Omitting non-public class.
if (gOptions.exportsOnly && (pClassDef.access_flags_ & kAccPublic) == 0) {
@@ -1480,7 +1480,7 @@
}
// Interfaces.
- const DexFile::TypeList* pInterfaces = pDexFile->GetInterfacesList(pClassDef);
+ const dex::TypeList* pInterfaces = pDexFile->GetInterfacesList(pClassDef);
if (pInterfaces != nullptr) {
for (u4 i = 0; i < pInterfaces->Size(); i++) {
dumpInterface(pDexFile, pInterfaces->GetTypeItem(i), i);
@@ -1552,7 +1552,7 @@
}
static void dumpMethodHandle(const DexFile* pDexFile, u4 idx) {
- const DexFile::MethodHandleItem& mh = pDexFile->GetMethodHandle(idx);
+ const dex::MethodHandleItem& mh = pDexFile->GetMethodHandle(idx);
const char* type = nullptr;
bool is_instance = false;
bool is_invoke = false;
@@ -1609,12 +1609,12 @@
std::string member_type;
if (type != nullptr) {
if (is_invoke) {
- const DexFile::MethodId& method_id = pDexFile->GetMethodId(mh.field_or_method_idx_);
+ const dex::MethodId& method_id = pDexFile->GetMethodId(mh.field_or_method_idx_);
declaring_class = pDexFile->GetMethodDeclaringClassDescriptor(method_id);
member = pDexFile->GetMethodName(method_id);
member_type = pDexFile->GetMethodSignature(method_id).ToString();
} else {
- const DexFile::FieldId& field_id = pDexFile->GetFieldId(mh.field_or_method_idx_);
+ const dex::FieldId& field_id = pDexFile->GetFieldId(mh.field_or_method_idx_);
declaring_class = pDexFile->GetFieldDeclaringClassDescriptor(field_id);
member = pDexFile->GetFieldName(field_id);
member_type = pDexFile->GetFieldTypeDescriptor(field_id);
@@ -1646,7 +1646,7 @@
}
static void dumpCallSite(const DexFile* pDexFile, u4 idx) {
- const DexFile::CallSiteIdItem& call_site_id = pDexFile->GetCallSiteId(idx);
+ const dex::CallSiteIdItem& call_site_id = pDexFile->GetCallSiteId(idx);
CallSiteArrayValueIterator it(*pDexFile, call_site_id);
if (it.Size() < 3) {
LOG(ERROR) << "ERROR: Call site " << idx << " has too few values.";
@@ -1659,7 +1659,7 @@
const char* method_name = pDexFile->StringDataByIdx(method_name_idx);
it.Next();
dex::ProtoIndex method_type_idx = static_cast<dex::ProtoIndex>(it.GetJavaValue().i);
- const DexFile::ProtoId& method_type_id = pDexFile->GetProtoId(method_type_idx);
+ const dex::ProtoId& method_type_id = pDexFile->GetProtoId(method_type_idx);
std::string method_type = pDexFile->GetProtoSignature(method_type_id).ToString();
it.Next();
@@ -1717,7 +1717,7 @@
case EncodedArrayValueIterator::ValueType::kMethodType: {
type = "MethodType";
dex::ProtoIndex proto_idx = static_cast<dex::ProtoIndex>(it.GetJavaValue().i);
- const DexFile::ProtoId& proto_id = pDexFile->GetProtoId(proto_idx);
+ const dex::ProtoId& proto_id = pDexFile->GetProtoId(proto_idx);
value = pDexFile->GetProtoSignature(proto_id).ToString();
break;
}
@@ -1734,7 +1734,7 @@
case EncodedArrayValueIterator::ValueType::kType: {
type = "Class";
dex::TypeIndex type_idx = static_cast<dex::TypeIndex>(it.GetJavaValue().i);
- const DexFile::TypeId& type_id = pDexFile->GetTypeId(type_idx);
+ const dex::TypeId& type_id = pDexFile->GetTypeId(type_idx);
value = pDexFile->GetTypeDescriptor(type_id);
break;
}
diff --git a/dexlayout/dex_ir_builder.cc b/dexlayout/dex_ir_builder.cc
index 92e438c..f4195b2 100644
--- a/dexlayout/dex_ir_builder.cc
+++ b/dexlayout/dex_ir_builder.cc
@@ -152,21 +152,21 @@
void CreateCallSitesAndMethodHandles(const DexFile& dex_file);
- TypeList* CreateTypeList(const DexFile::TypeList* type_list, uint32_t offset);
+ TypeList* CreateTypeList(const dex::TypeList* type_list, uint32_t offset);
EncodedArrayItem* CreateEncodedArrayItem(const DexFile& dex_file,
const uint8_t* static_data,
uint32_t offset);
AnnotationItem* CreateAnnotationItem(const DexFile& dex_file,
- const DexFile::AnnotationItem* annotation);
+ const dex::AnnotationItem* annotation);
AnnotationSetItem* CreateAnnotationSetItem(const DexFile& dex_file,
- const DexFile::AnnotationSetItem* disk_annotations_item, uint32_t offset);
+ const dex::AnnotationSetItem* disk_annotations_item, uint32_t offset);
AnnotationsDirectoryItem* CreateAnnotationsDirectoryItem(const DexFile& dex_file,
- const DexFile::AnnotationsDirectoryItem* disk_annotations_item, uint32_t offset);
+ const dex::AnnotationsDirectoryItem* disk_annotations_item, uint32_t offset);
CodeItem* DedupeOrCreateCodeItem(const DexFile& dex_file,
- const DexFile::CodeItem* disk_code_item,
+ const dex::CodeItem* disk_code_item,
uint32_t offset,
uint32_t dex_method_index);
- ClassData* CreateClassData(const DexFile& dex_file, const DexFile::ClassDef& class_def);
+ ClassData* CreateClassData(const DexFile& dex_file, const dex::ClassDef& class_def);
void AddAnnotationsFromMapListSection(const DexFile& dex_file,
uint32_t start_offset,
@@ -207,7 +207,7 @@
ParameterAnnotation* GenerateParameterAnnotation(
const DexFile& dex_file,
MethodId* method_id,
- const DexFile::AnnotationSetRefList* annotation_set_ref_list,
+ const dex::AnnotationSetRefList* annotation_set_ref_list,
uint32_t offset);
template <typename Type, class... Args>
@@ -300,7 +300,7 @@
if (!options.class_filter_.empty()) {
// If the filter is enabled (not empty), filter out classes that don't have a matching
// descriptor.
- const DexFile::ClassDef& class_def = dex_file.GetClassDef(i);
+ const dex::ClassDef& class_def = dex_file.GetClassDef(i);
const char* descriptor = dex_file.GetClassDescriptor(class_def);
if (options.class_filter_.find(descriptor) == options.class_filter_.end()) {
continue;
@@ -331,10 +331,10 @@
void BuilderMaps::CheckAndSetRemainingOffsets(const DexFile& dex_file, const Options& options) {
const DexFile::Header& disk_header = dex_file.GetHeader();
// Read MapItems and validate/set remaining offsets.
- const DexFile::MapList* map = dex_file.GetMapList();
+ const dex::MapList* map = dex_file.GetMapList();
const uint32_t count = map->size_;
for (uint32_t i = 0; i < count; ++i) {
- const DexFile::MapItem* item = map->list_ + i;
+ const dex::MapItem* item = map->list_ + i;
switch (item->type_) {
case DexFile::kDexTypeHeaderItem:
CHECK_EQ(item->size_, 1u);
@@ -421,7 +421,7 @@
}
void BuilderMaps::CreateStringId(const DexFile& dex_file, uint32_t i) {
- const DexFile::StringId& disk_string_id = dex_file.GetStringId(dex::StringIndex(i));
+ const dex::StringId& disk_string_id = dex_file.GetStringId(dex::StringIndex(i));
StringData* string_data =
string_datas_map_.CreateAndAddItem(header_->StringDatas(),
eagerly_assign_offsets_,
@@ -434,7 +434,7 @@
}
void BuilderMaps::CreateTypeId(const DexFile& dex_file, uint32_t i) {
- const DexFile::TypeId& disk_type_id = dex_file.GetTypeId(dex::TypeIndex(i));
+ const dex::TypeId& disk_type_id = dex_file.GetTypeId(dex::TypeIndex(i));
CreateAndAddIndexedItem(header_->TypeIds(),
header_->TypeIds().GetOffset() + i * TypeId::ItemSize(),
i,
@@ -442,8 +442,8 @@
}
void BuilderMaps::CreateProtoId(const DexFile& dex_file, uint32_t i) {
- const DexFile::ProtoId& disk_proto_id = dex_file.GetProtoId(dex::ProtoIndex(i));
- const DexFile::TypeList* type_list = dex_file.GetProtoParameters(disk_proto_id);
+ const dex::ProtoId& disk_proto_id = dex_file.GetProtoId(dex::ProtoIndex(i));
+ const dex::TypeList* type_list = dex_file.GetProtoParameters(disk_proto_id);
TypeList* parameter_type_list = CreateTypeList(type_list, disk_proto_id.parameters_off_);
CreateAndAddIndexedItem(header_->ProtoIds(),
@@ -455,7 +455,7 @@
}
void BuilderMaps::CreateFieldId(const DexFile& dex_file, uint32_t i) {
- const DexFile::FieldId& disk_field_id = dex_file.GetFieldId(i);
+ const dex::FieldId& disk_field_id = dex_file.GetFieldId(i);
CreateAndAddIndexedItem(header_->FieldIds(),
header_->FieldIds().GetOffset() + i * FieldId::ItemSize(),
i,
@@ -465,7 +465,7 @@
}
void BuilderMaps::CreateMethodId(const DexFile& dex_file, uint32_t i) {
- const DexFile::MethodId& disk_method_id = dex_file.GetMethodId(i);
+ const dex::MethodId& disk_method_id = dex_file.GetMethodId(i);
CreateAndAddIndexedItem(header_->MethodIds(),
header_->MethodIds().GetOffset() + i * MethodId::ItemSize(),
i,
@@ -475,19 +475,19 @@
}
void BuilderMaps::CreateClassDef(const DexFile& dex_file, uint32_t i) {
- const DexFile::ClassDef& disk_class_def = dex_file.GetClassDef(i);
+ const dex::ClassDef& disk_class_def = dex_file.GetClassDef(i);
const TypeId* class_type = header_->TypeIds()[disk_class_def.class_idx_.index_];
uint32_t access_flags = disk_class_def.access_flags_;
const TypeId* superclass = header_->GetTypeIdOrNullPtr(disk_class_def.superclass_idx_.index_);
- const DexFile::TypeList* type_list = dex_file.GetInterfacesList(disk_class_def);
+ const dex::TypeList* type_list = dex_file.GetInterfacesList(disk_class_def);
TypeList* interfaces_type_list = CreateTypeList(type_list, disk_class_def.interfaces_off_);
const StringId* source_file =
header_->GetStringIdOrNullPtr(disk_class_def.source_file_idx_.index_);
// Annotations.
AnnotationsDirectoryItem* annotations = nullptr;
- const DexFile::AnnotationsDirectoryItem* disk_annotations_directory_item =
+ const dex::AnnotationsDirectoryItem* disk_annotations_directory_item =
dex_file.GetAnnotationsDirectory(disk_class_def);
if (disk_annotations_directory_item != nullptr) {
annotations = CreateAnnotationsDirectoryItem(
@@ -512,7 +512,7 @@
}
void BuilderMaps::CreateCallSiteId(const DexFile& dex_file, uint32_t i) {
- const DexFile::CallSiteIdItem& disk_call_site_id = dex_file.GetCallSiteId(i);
+ const dex::CallSiteIdItem& disk_call_site_id = dex_file.GetCallSiteId(i);
const uint8_t* disk_call_item_ptr = dex_file.DataBegin() + disk_call_site_id.data_off_;
EncodedArrayItem* call_site_item =
CreateEncodedArrayItem(dex_file, disk_call_item_ptr, disk_call_site_id.data_off_);
@@ -524,7 +524,7 @@
}
void BuilderMaps::CreateMethodHandleItem(const DexFile& dex_file, uint32_t i) {
- const DexFile::MethodHandleItem& disk_method_handle = dex_file.GetMethodHandle(i);
+ const dex::MethodHandleItem& disk_method_handle = dex_file.GetMethodHandle(i);
uint16_t index = disk_method_handle.field_or_method_idx_;
DexFile::MethodHandleType type =
static_cast<DexFile::MethodHandleType>(disk_method_handle.method_handle_type_);
@@ -551,9 +551,9 @@
void BuilderMaps::CreateCallSitesAndMethodHandles(const DexFile& dex_file) {
// Iterate through the map list and set the offset of the CallSiteIds and MethodHandleItems.
- const DexFile::MapList* map = dex_file.GetMapList();
+ const dex::MapList* map = dex_file.GetMapList();
for (uint32_t i = 0; i < map->size_; ++i) {
- const DexFile::MapItem* item = map->list_ + i;
+ const dex::MapItem* item = map->list_ + i;
switch (item->type_) {
case DexFile::kDexTypeCallSiteIdItem:
header_->CallSiteIds().SetOffset(item->offset_);
@@ -575,7 +575,7 @@
}
}
-TypeList* BuilderMaps::CreateTypeList(const DexFile::TypeList* dex_type_list, uint32_t offset) {
+TypeList* BuilderMaps::CreateTypeList(const dex::TypeList* dex_type_list, uint32_t offset) {
if (dex_type_list == nullptr) {
return nullptr;
}
@@ -623,7 +623,7 @@
uint32_t current_offset = start_offset;
for (size_t i = 0; i < count; ++i) {
// Annotation that we didn't process already, add it to the set.
- const DexFile::AnnotationItem* annotation = dex_file.GetAnnotationItemAtOffset(current_offset);
+ const dex::AnnotationItem* annotation = dex_file.GetAnnotationItemAtOffset(current_offset);
AnnotationItem* annotation_item = CreateAnnotationItem(dex_file, annotation);
DCHECK(annotation_item != nullptr);
current_offset += annotation_item->GetSize();
@@ -632,7 +632,7 @@
void BuilderMaps::AddHiddenapiClassDataFromMapListSection(const DexFile& dex_file,
uint32_t offset) {
- const DexFile::HiddenapiClassData* hiddenapi_class_data =
+ const dex::HiddenapiClassData* hiddenapi_class_data =
dex_file.GetHiddenapiClassDataAtOffset(offset);
DCHECK(hiddenapi_class_data == dex_file.GetHiddenapiClassData());
@@ -669,7 +669,7 @@
}
AnnotationItem* BuilderMaps::CreateAnnotationItem(const DexFile& dex_file,
- const DexFile::AnnotationItem* annotation) {
+ const dex::AnnotationItem* annotation) {
const uint8_t* const start_data = reinterpret_cast<const uint8_t*>(annotation);
const uint32_t offset = start_data - dex_file.DataBegin();
AnnotationItem* annotation_item = annotation_items_map_.GetExistingObject(offset);
@@ -691,7 +691,7 @@
AnnotationSetItem* BuilderMaps::CreateAnnotationSetItem(const DexFile& dex_file,
- const DexFile::AnnotationSetItem* disk_annotations_item, uint32_t offset) {
+ const dex::AnnotationSetItem* disk_annotations_item, uint32_t offset) {
if (disk_annotations_item == nullptr || (disk_annotations_item->size_ == 0 && offset == 0)) {
return nullptr;
}
@@ -699,7 +699,7 @@
if (annotation_set_item == nullptr) {
std::vector<AnnotationItem*>* items = new std::vector<AnnotationItem*>();
for (uint32_t i = 0; i < disk_annotations_item->size_; ++i) {
- const DexFile::AnnotationItem* annotation =
+ const dex::AnnotationItem* annotation =
dex_file.GetAnnotationItem(disk_annotations_item, i);
if (annotation == nullptr) {
continue;
@@ -717,27 +717,27 @@
}
AnnotationsDirectoryItem* BuilderMaps::CreateAnnotationsDirectoryItem(const DexFile& dex_file,
- const DexFile::AnnotationsDirectoryItem* disk_annotations_item, uint32_t offset) {
+ const dex::AnnotationsDirectoryItem* disk_annotations_item, uint32_t offset) {
AnnotationsDirectoryItem* annotations_directory_item =
annotations_directory_items_map_.GetExistingObject(offset);
if (annotations_directory_item != nullptr) {
return annotations_directory_item;
}
- const DexFile::AnnotationSetItem* class_set_item =
+ const dex::AnnotationSetItem* class_set_item =
dex_file.GetClassAnnotationSet(disk_annotations_item);
AnnotationSetItem* class_annotation = nullptr;
if (class_set_item != nullptr) {
uint32_t item_offset = disk_annotations_item->class_annotations_off_;
class_annotation = CreateAnnotationSetItem(dex_file, class_set_item, item_offset);
}
- const DexFile::FieldAnnotationsItem* fields =
+ const dex::FieldAnnotationsItem* fields =
dex_file.GetFieldAnnotations(disk_annotations_item);
FieldAnnotationVector* field_annotations = nullptr;
if (fields != nullptr) {
field_annotations = new FieldAnnotationVector();
for (uint32_t i = 0; i < disk_annotations_item->fields_size_; ++i) {
FieldId* field_id = header_->FieldIds()[fields[i].field_idx_];
- const DexFile::AnnotationSetItem* field_set_item =
+ const dex::AnnotationSetItem* field_set_item =
dex_file.GetFieldAnnotationSetItem(fields[i]);
uint32_t annotation_set_offset = fields[i].annotations_off_;
AnnotationSetItem* annotation_set_item =
@@ -746,14 +746,14 @@
field_id, annotation_set_item));
}
}
- const DexFile::MethodAnnotationsItem* methods =
+ const dex::MethodAnnotationsItem* methods =
dex_file.GetMethodAnnotations(disk_annotations_item);
MethodAnnotationVector* method_annotations = nullptr;
if (methods != nullptr) {
method_annotations = new MethodAnnotationVector();
for (uint32_t i = 0; i < disk_annotations_item->methods_size_; ++i) {
MethodId* method_id = header_->MethodIds()[methods[i].method_idx_];
- const DexFile::AnnotationSetItem* method_set_item =
+ const dex::AnnotationSetItem* method_set_item =
dex_file.GetMethodAnnotationSetItem(methods[i]);
uint32_t annotation_set_offset = methods[i].annotations_off_;
AnnotationSetItem* annotation_set_item =
@@ -762,14 +762,14 @@
method_id, annotation_set_item));
}
}
- const DexFile::ParameterAnnotationsItem* parameters =
+ const dex::ParameterAnnotationsItem* parameters =
dex_file.GetParameterAnnotations(disk_annotations_item);
ParameterAnnotationVector* parameter_annotations = nullptr;
if (parameters != nullptr) {
parameter_annotations = new ParameterAnnotationVector();
for (uint32_t i = 0; i < disk_annotations_item->parameters_size_; ++i) {
MethodId* method_id = header_->MethodIds()[parameters[i].method_idx_];
- const DexFile::AnnotationSetRefList* list =
+ const dex::AnnotationSetRefList* list =
dex_file.GetParameterAnnotationSetRefList(¶meters[i]);
parameter_annotations->push_back(std::unique_ptr<ParameterAnnotation>(
GenerateParameterAnnotation(dex_file, method_id, list, parameters[i].annotations_off_)));
@@ -786,7 +786,7 @@
}
CodeItem* BuilderMaps::DedupeOrCreateCodeItem(const DexFile& dex_file,
- const DexFile::CodeItem* disk_code_item,
+ const dex::CodeItem* disk_code_item,
uint32_t offset,
uint32_t dex_method_index) {
if (disk_code_item == nullptr) {
@@ -827,7 +827,7 @@
if (accessor.TriesSize() > 0) {
tries = new TryItemVector();
handler_list = new CatchHandlerVector();
- for (const DexFile::TryItem& disk_try_item : accessor.TryItems()) {
+ for (const dex::TryItem& disk_try_item : accessor.TryItems()) {
uint32_t start_addr = disk_try_item.start_addr_;
uint16_t insn_count = disk_try_item.insn_count_;
uint16_t handler_off = disk_try_item.handler_off_;
@@ -941,7 +941,7 @@
}
ClassData* BuilderMaps::CreateClassData(const DexFile& dex_file,
- const DexFile::ClassDef& class_def) {
+ const dex::ClassDef& class_def) {
// Read the fields and methods defined by the class, resolving the circular reference from those
// to classes by setting class at the same time.
const uint32_t offset = class_def.class_data_off_;
@@ -1225,7 +1225,7 @@
const ClassAccessor::Method& method) {
MethodId* method_id = header_->MethodIds()[method.GetIndex()];
uint32_t access_flags = method.GetAccessFlags();
- const DexFile::CodeItem* disk_code_item = method.GetCodeItem();
+ const dex::CodeItem* disk_code_item = method.GetCodeItem();
// Temporary hack to prevent incorrectly deduping code items if they have the same offset since
// they may have different debug info streams.
CodeItem* code_item = DedupeOrCreateCodeItem(dex_file,
@@ -1238,13 +1238,13 @@
ParameterAnnotation* BuilderMaps::GenerateParameterAnnotation(
const DexFile& dex_file,
MethodId* method_id,
- const DexFile::AnnotationSetRefList* annotation_set_ref_list,
+ const dex::AnnotationSetRefList* annotation_set_ref_list,
uint32_t offset) {
AnnotationSetRefList* set_ref_list = annotation_set_ref_lists_map_.GetExistingObject(offset);
if (set_ref_list == nullptr) {
std::vector<AnnotationSetItem*>* annotations = new std::vector<AnnotationSetItem*>();
for (uint32_t i = 0; i < annotation_set_ref_list->size_; ++i) {
- const DexFile::AnnotationSetItem* annotation_set_item =
+ const dex::AnnotationSetItem* annotation_set_item =
dex_file.GetSetRefItemItem(&annotation_set_ref_list->list_[i]);
uint32_t set_offset = annotation_set_ref_list->list_[i].annotations_off_;
annotations->push_back(CreateAnnotationSetItem(dex_file, annotation_set_item, set_offset));
diff --git a/dexlayout/dex_writer.cc b/dexlayout/dex_writer.cc
index ef6ccf9..143f5b0 100644
--- a/dexlayout/dex_writer.cc
+++ b/dexlayout/dex_writer.cc
@@ -535,10 +535,10 @@
dex_ir::CodeItem* code_item,
bool reserve_only) {
if (code_item->TriesSize() != 0) {
- stream->AlignTo(DexFile::TryItem::kAlignment);
+ stream->AlignTo(dex::TryItem::kAlignment);
// Write try items.
for (std::unique_ptr<const dex_ir::TryItem>& try_item : *code_item->Tries()) {
- DexFile::TryItem disk_try_item;
+ dex::TryItem disk_try_item;
if (!reserve_only) {
disk_try_item.start_addr_ = try_item->StartAddr();
disk_try_item.insn_count_ = try_item->InsnCount();
@@ -712,7 +712,7 @@
stream->Write(&map_list_size, sizeof(map_list_size));
while (!queue->empty()) {
const MapItem& item = queue->top();
- DexFile::MapItem map_item;
+ dex::MapItem map_item;
map_item.type_ = item.type_;
map_item.size_ = item.size_;
map_item.offset_ = item.offset_;
diff --git a/dexlayout/dexlayout.h b/dexlayout/dexlayout.h
index 6e006b7..535f789 100644
--- a/dexlayout/dexlayout.h
+++ b/dexlayout/dexlayout.h
@@ -182,7 +182,7 @@
std::string* error_msg);
void DumpCFG(const DexFile* dex_file, int idx);
- void DumpCFG(const DexFile* dex_file, uint32_t dex_method_idx, const DexFile::CodeItem* code);
+ void DumpCFG(const DexFile* dex_file, uint32_t dex_method_idx, const dex::CodeItem* code);
Options& options_;
ProfileCompilationInfo* info_;
diff --git a/dexlayout/dexlayout_test.cc b/dexlayout/dexlayout_test.cc
index 54157d9..b68449e 100644
--- a/dexlayout/dexlayout_test.cc
+++ b/dexlayout/dexlayout_test.cc
@@ -687,7 +687,7 @@
// Change the dex instructions to make an opcode that spans past the end of the code item.
for (ClassAccessor accessor : dex->GetClasses()) {
for (const ClassAccessor::Method& method : accessor.GetMethods()) {
- DexFile::CodeItem* item = const_cast<DexFile::CodeItem*>(method.GetCodeItem());
+ dex::CodeItem* item = const_cast<dex::CodeItem*>(method.GetCodeItem());
if (item != nullptr) {
CodeItemInstructionAccessor instructions(*dex, item);
if (instructions.begin() != instructions.end()) {
@@ -793,7 +793,7 @@
for (const std::unique_ptr<const DexFile>& dex_file : dex_files) {
EXPECT_GT(dex_file->NumClassDefs(), 1u);
for (uint32_t i = 0; i < dex_file->NumClassDefs(); ++i) {
- const DexFile::ClassDef& class_def = dex_file->GetClassDef(i);
+ const dex::ClassDef& class_def = dex_file->GetClassDef(i);
LOG(INFO) << dex_file->GetClassDescriptor(class_def);
}
Options options;
@@ -828,7 +828,7 @@
ASSERT_EQ(output_dex_file->NumClassDefs(), options.class_filter_.size());
for (uint32_t i = 0; i < output_dex_file->NumClassDefs(); ++i) {
// Check that every class in the output dex file is in the filter.
- const DexFile::ClassDef& class_def = output_dex_file->GetClassDef(i);
+ const dex::ClassDef& class_def = output_dex_file->GetClassDef(i);
ASSERT_TRUE(options.class_filter_.find(output_dex_file->GetClassDescriptor(class_def)) !=
options.class_filter_.end());
}
diff --git a/dexlist/dexlist.cc b/dexlist/dexlist.cc
index bdf3ca6..dd32fae 100644
--- a/dexlist/dexlist.cc
+++ b/dexlist/dexlist.cc
@@ -84,7 +84,7 @@
*/
static void dumpMethod(const DexFile* pDexFile,
const char* fileName, u4 idx, u4 flags ATTRIBUTE_UNUSED,
- const DexFile::CodeItem* pCode, u4 codeOffset) {
+ const dex::CodeItem* pCode, u4 codeOffset) {
// Abstract and native methods don't get listed.
if (pCode == nullptr || codeOffset == 0) {
return;
@@ -92,7 +92,7 @@
CodeItemDebugInfoAccessor accessor(*pDexFile, pCode, idx);
// Method information.
- const DexFile::MethodId& pMethodId = pDexFile->GetMethodId(idx);
+ const dex::MethodId& pMethodId = pDexFile->GetMethodId(idx);
const char* methodName = pDexFile->StringDataByIdx(pMethodId.name_idx_);
const char* classDescriptor = pDexFile->StringByTypeIdx(pMethodId.class_idx_);
std::unique_ptr<char[]> className(descriptorToDot(classDescriptor));
@@ -134,7 +134,7 @@
* Runs through all direct and virtual methods in the class.
*/
void dumpClass(const DexFile* pDexFile, u4 idx) {
- const DexFile::ClassDef& class_def = pDexFile->GetClassDef(idx);
+ const dex::ClassDef& class_def = pDexFile->GetClassDef(idx);
const char* fileName = nullptr;
if (class_def.source_file_idx_.IsValid()) {
diff --git a/libdexfile/Android.bp b/libdexfile/Android.bp
index a4f7e25..9c48aa2 100644
--- a/libdexfile/Android.bp
+++ b/libdexfile/Android.bp
@@ -32,6 +32,7 @@
"dex/dex_instruction.cc",
"dex/modifiers.cc",
"dex/primitive.cc",
+ "dex/signature.cc",
"dex/standard_dex_file.cc",
"dex/type_lookup_table.cc",
"dex/utf.cc",
diff --git a/libdexfile/dex/art_dex_file_loader_test.cc b/libdexfile/dex/art_dex_file_loader_test.cc
index f7a2062..f9516db 100644
--- a/libdexfile/dex/art_dex_file_loader_test.cc
+++ b/libdexfile/dex/art_dex_file_loader_test.cc
@@ -107,13 +107,13 @@
ASSERT_TRUE(raw.get() != nullptr);
EXPECT_EQ(3U, raw->NumClassDefs());
- const DexFile::ClassDef& c0 = raw->GetClassDef(0);
+ const dex::ClassDef& c0 = raw->GetClassDef(0);
EXPECT_STREQ("LNested$1;", raw->GetClassDescriptor(c0));
- const DexFile::ClassDef& c1 = raw->GetClassDef(1);
+ const dex::ClassDef& c1 = raw->GetClassDef(1);
EXPECT_STREQ("LNested$Inner;", raw->GetClassDescriptor(c1));
- const DexFile::ClassDef& c2 = raw->GetClassDef(2);
+ const dex::ClassDef& c2 = raw->GetClassDef(2);
EXPECT_STREQ("LNested;", raw->GetClassDescriptor(c2));
}
@@ -122,7 +122,7 @@
ASSERT_TRUE(raw.get() != nullptr);
EXPECT_EQ(1U, raw->NumClassDefs());
- const DexFile::ClassDef& class_def = raw->GetClassDef(0);
+ const dex::ClassDef& class_def = raw->GetClassDef(0);
ASSERT_STREQ("LGetMethodSignature;", raw->GetClassDescriptor(class_def));
ClassAccessor accessor(*raw, class_def);
@@ -133,7 +133,7 @@
// Check the signature for the static initializer.
{
ASSERT_EQ(1U, accessor.NumDirectMethods());
- const DexFile::MethodId& method_id = raw->GetMethodId(cur_method->GetIndex());
+ const dex::MethodId& method_id = raw->GetMethodId(cur_method->GetIndex());
const char* name = raw->StringDataByIdx(method_id.name_idx_);
ASSERT_STREQ("<init>", name);
std::string signature(raw->GetMethodSignature(method_id).ToString());
@@ -207,7 +207,7 @@
for (const Result& r : results) {
++cur_method;
ASSERT_TRUE(cur_method != methods.end());
- const DexFile::MethodId& method_id = raw->GetMethodId(cur_method->GetIndex());
+ const dex::MethodId& method_id = raw->GetMethodId(cur_method->GetIndex());
const char* name = raw->StringDataByIdx(method_id.name_idx_);
ASSERT_STREQ(r.name, name);
@@ -232,7 +232,7 @@
"D", "I", "J", nullptr };
for (size_t i = 0; strings[i] != nullptr; i++) {
const char* str = strings[i];
- const DexFile::StringId* str_id = raw->FindStringId(str);
+ const dex::StringId* str_id = raw->FindStringId(str);
const char* dex_str = raw->GetStringData(*str_id);
EXPECT_STREQ(dex_str, str);
}
@@ -241,10 +241,10 @@
TEST_F(ArtDexFileLoaderTest, FindTypeId) {
for (size_t i = 0; i < java_lang_dex_file_->NumTypeIds(); i++) {
const char* type_str = java_lang_dex_file_->StringByTypeIdx(dex::TypeIndex(i));
- const DexFile::StringId* type_str_id = java_lang_dex_file_->FindStringId(type_str);
+ const dex::StringId* type_str_id = java_lang_dex_file_->FindStringId(type_str);
ASSERT_TRUE(type_str_id != nullptr);
dex::StringIndex type_str_idx = java_lang_dex_file_->GetIndexForStringId(*type_str_id);
- const DexFile::TypeId* type_id = java_lang_dex_file_->FindTypeId(type_str_idx);
+ const dex::TypeId* type_id = java_lang_dex_file_->FindTypeId(type_str_idx);
ASSERT_EQ(type_id, java_lang_dex_file_->FindTypeId(type_str));
ASSERT_TRUE(type_id != nullptr);
EXPECT_EQ(java_lang_dex_file_->GetIndexForTypeId(*type_id).index_, i);
@@ -253,15 +253,15 @@
TEST_F(ArtDexFileLoaderTest, FindProtoId) {
for (size_t i = 0; i < java_lang_dex_file_->NumProtoIds(); i++) {
- const DexFile::ProtoId& to_find = java_lang_dex_file_->GetProtoId(dex::ProtoIndex(i));
- const DexFile::TypeList* to_find_tl = java_lang_dex_file_->GetProtoParameters(to_find);
+ const dex::ProtoId& to_find = java_lang_dex_file_->GetProtoId(dex::ProtoIndex(i));
+ const dex::TypeList* to_find_tl = java_lang_dex_file_->GetProtoParameters(to_find);
std::vector<dex::TypeIndex> to_find_types;
if (to_find_tl != nullptr) {
for (size_t j = 0; j < to_find_tl->Size(); j++) {
to_find_types.push_back(to_find_tl->GetTypeItem(j).type_idx_);
}
}
- const DexFile::ProtoId* found =
+ const dex::ProtoId* found =
java_lang_dex_file_->FindProtoId(to_find.return_type_idx_, to_find_types);
ASSERT_TRUE(found != nullptr);
EXPECT_EQ(java_lang_dex_file_->GetIndexForProtoId(*found), dex::ProtoIndex(i));
@@ -270,11 +270,11 @@
TEST_F(ArtDexFileLoaderTest, FindMethodId) {
for (size_t i = 0; i < java_lang_dex_file_->NumMethodIds(); i++) {
- const DexFile::MethodId& to_find = java_lang_dex_file_->GetMethodId(i);
- const DexFile::TypeId& klass = java_lang_dex_file_->GetTypeId(to_find.class_idx_);
- const DexFile::StringId& name = java_lang_dex_file_->GetStringId(to_find.name_idx_);
- const DexFile::ProtoId& signature = java_lang_dex_file_->GetProtoId(to_find.proto_idx_);
- const DexFile::MethodId* found = java_lang_dex_file_->FindMethodId(klass, name, signature);
+ const dex::MethodId& to_find = java_lang_dex_file_->GetMethodId(i);
+ const dex::TypeId& klass = java_lang_dex_file_->GetTypeId(to_find.class_idx_);
+ const dex::StringId& name = java_lang_dex_file_->GetStringId(to_find.name_idx_);
+ const dex::ProtoId& signature = java_lang_dex_file_->GetProtoId(to_find.proto_idx_);
+ const dex::MethodId* found = java_lang_dex_file_->FindMethodId(klass, name, signature);
ASSERT_TRUE(found != nullptr) << "Didn't find method " << i << ": "
<< java_lang_dex_file_->StringByTypeIdx(to_find.class_idx_) << "."
<< java_lang_dex_file_->GetStringData(name)
@@ -285,11 +285,11 @@
TEST_F(ArtDexFileLoaderTest, FindFieldId) {
for (size_t i = 0; i < java_lang_dex_file_->NumFieldIds(); i++) {
- const DexFile::FieldId& to_find = java_lang_dex_file_->GetFieldId(i);
- const DexFile::TypeId& klass = java_lang_dex_file_->GetTypeId(to_find.class_idx_);
- const DexFile::StringId& name = java_lang_dex_file_->GetStringId(to_find.name_idx_);
- const DexFile::TypeId& type = java_lang_dex_file_->GetTypeId(to_find.type_idx_);
- const DexFile::FieldId* found = java_lang_dex_file_->FindFieldId(klass, name, type);
+ const dex::FieldId& to_find = java_lang_dex_file_->GetFieldId(i);
+ const dex::TypeId& klass = java_lang_dex_file_->GetTypeId(to_find.class_idx_);
+ const dex::StringId& name = java_lang_dex_file_->GetStringId(to_find.name_idx_);
+ const dex::TypeId& type = java_lang_dex_file_->GetTypeId(to_find.type_idx_);
+ const dex::FieldId* found = java_lang_dex_file_->FindFieldId(klass, name, type);
ASSERT_TRUE(found != nullptr) << "Didn't find field " << i << ": "
<< java_lang_dex_file_->StringByTypeIdx(to_find.type_idx_) << " "
<< java_lang_dex_file_->StringByTypeIdx(to_find.class_idx_) << "."
diff --git a/libdexfile/dex/class_accessor-inl.h b/libdexfile/dex/class_accessor-inl.h
index 334b072..8562d05 100644
--- a/libdexfile/dex/class_accessor-inl.h
+++ b/libdexfile/dex/class_accessor-inl.h
@@ -24,6 +24,8 @@
#include "base/utils.h"
#include "class_iterator.h"
#include "code_item_accessors-inl.h"
+#include "dex_file.h"
+#include "method_reference.h"
namespace art {
@@ -31,7 +33,7 @@
: ClassAccessor(data.dex_file_, data.class_def_idx_) {}
inline ClassAccessor::ClassAccessor(const DexFile& dex_file,
- const DexFile::ClassDef& class_def,
+ const dex::ClassDef& class_def,
bool parse_hiddenapi_class_data)
: ClassAccessor(dex_file,
dex_file.GetClassData(class_def),
@@ -54,7 +56,7 @@
num_direct_methods_(ptr_pos_ != nullptr ? DecodeUnsignedLeb128(&ptr_pos_) : 0u),
num_virtual_methods_(ptr_pos_ != nullptr ? DecodeUnsignedLeb128(&ptr_pos_) : 0u) {
if (parse_hiddenapi_class_data && class_def_index != DexFile::kDexNoIndex32) {
- const DexFile::HiddenapiClassData* hiddenapi_class_data = dex_file.GetHiddenapiClassData();
+ const dex::HiddenapiClassData* hiddenapi_class_data = dex_file.GetHiddenapiClassData();
if (hiddenapi_class_data != nullptr) {
hiddenapi_ptr_pos_ = hiddenapi_class_data->GetFlagsPointer(class_def_index);
}
@@ -71,6 +73,11 @@
}
}
+inline MethodReference ClassAccessor::Method::GetReference() const {
+ return MethodReference(&dex_file_, GetIndex());
+}
+
+
inline void ClassAccessor::Field::Read() {
index_ += DecodeUnsignedLeb128(&ptr_pos_);
access_flags_ = DecodeUnsignedLeb128(&ptr_pos_);
@@ -131,7 +138,7 @@
VoidFunctor());
}
-inline const DexFile::CodeItem* ClassAccessor::GetCodeItem(const Method& method) const {
+inline const dex::CodeItem* ClassAccessor::GetCodeItem(const Method& method) const {
return dex_file_.GetCodeItem(method.GetCodeItemOffset());
}
@@ -147,7 +154,7 @@
return dex_file_.StringByTypeIdx(GetClassIdx());
}
-inline const DexFile::CodeItem* ClassAccessor::Method::GetCodeItem() const {
+inline const dex::CodeItem* ClassAccessor::Method::GetCodeItem() const {
return dex_file_.GetCodeItem(code_off_);
}
@@ -231,6 +238,10 @@
return dex_file_.GetClassDef(class_def_index_).class_idx_;
}
+inline const dex::ClassDef& ClassAccessor::GetClassDef() const {
+ return dex_file_.GetClassDef(GetClassDefIndex());
+}
+
} // namespace art
#endif // ART_LIBDEXFILE_DEX_CLASS_ACCESSOR_INL_H_
diff --git a/libdexfile/dex/class_accessor.h b/libdexfile/dex/class_accessor.h
index bd7b912..1628256 100644
--- a/libdexfile/dex/class_accessor.h
+++ b/libdexfile/dex/class_accessor.h
@@ -18,14 +18,21 @@
#define ART_LIBDEXFILE_DEX_CLASS_ACCESSOR_H_
#include "code_item_accessors.h"
-#include "dex_file.h"
+#include "dex_file_types.h"
#include "invoke_type.h"
-#include "method_reference.h"
#include "modifiers.h"
namespace art {
+namespace dex {
+struct ClassDef;
+struct CodeItem;
+} // namespace dex
+
class ClassIteratorData;
+class DexFile;
+template <typename Iter> class IterationRange;
+class MethodReference;
// Classes to access Dex data.
class ClassAccessor {
@@ -92,14 +99,12 @@
: GetVirtualMethodInvokeType(class_access_flags);
}
- MethodReference GetReference() const {
- return MethodReference(&dex_file_, GetIndex());
- }
+ MethodReference GetReference() const;
CodeItemInstructionAccessor GetInstructions() const;
CodeItemDataAccessor GetInstructionsAndData() const;
- const DexFile::CodeItem* GetCodeItem() const;
+ const dex::CodeItem* GetCodeItem() const;
bool IsStaticOrDirect() const {
return is_static_or_direct_;
@@ -266,18 +271,18 @@
ALWAYS_INLINE ClassAccessor(const ClassIteratorData& data); // NOLINT [runtime/explicit] [5]
ALWAYS_INLINE ClassAccessor(const DexFile& dex_file,
- const DexFile::ClassDef& class_def,
+ const dex::ClassDef& class_def,
bool parse_hiddenapi_class_data = false);
ALWAYS_INLINE ClassAccessor(const DexFile& dex_file, uint32_t class_def_index);
ClassAccessor(const DexFile& dex_file,
const uint8_t* class_data,
- uint32_t class_def_index = DexFile::kDexNoIndex32,
+ uint32_t class_def_index = dex::kDexNoIndex,
bool parse_hiddenapi_class_data = false);
// Return the code item for a method.
- const DexFile::CodeItem* GetCodeItem(const Method& method) const;
+ const dex::CodeItem* GetCodeItem(const Method& method) const;
// Iterator data is not very iterator friendly, use visitors to get around this.
template <typename StaticFieldVisitor,
@@ -361,9 +366,7 @@
return class_def_index_;
}
- const DexFile::ClassDef& GetClassDef() const {
- return dex_file_.GetClassDef(GetClassDefIndex());
- }
+ const dex::ClassDef& GetClassDef() const;
protected:
// Template visitor to reduce copy paste for visiting elements.
diff --git a/libdexfile/dex/class_accessor_test.cc b/libdexfile/dex/class_accessor_test.cc
index 1f30ae5..9f2ee23 100644
--- a/libdexfile/dex/class_accessor_test.cc
+++ b/libdexfile/dex/class_accessor_test.cc
@@ -30,7 +30,7 @@
uint32_t class_def_idx = 0u;
ASSERT_GT(dex_file->NumClassDefs(), 0u);
for (ClassAccessor accessor : dex_file->GetClasses()) {
- const DexFile::ClassDef& class_def = dex_file->GetClassDef(accessor.GetClassDefIndex());
+ const dex::ClassDef& class_def = dex_file->GetClassDef(accessor.GetClassDefIndex());
EXPECT_EQ(accessor.GetDescriptor(), dex_file->StringByTypeIdx(class_def.class_idx_));
EXPECT_EQ(class_def_idx, accessor.GetClassDefIndex());
++class_def_idx;
diff --git a/libdexfile/dex/code_item_accessors-inl.h b/libdexfile/dex/code_item_accessors-inl.h
index bbf2224..632a787 100644
--- a/libdexfile/dex/code_item_accessors-inl.h
+++ b/libdexfile/dex/code_item_accessors-inl.h
@@ -19,6 +19,7 @@
#include "code_item_accessors.h"
+#include "base/iteration_range.h"
#include "compact_dex_file.h"
#include "dex_file-inl.h"
#include "standard_dex_file.h"
@@ -32,7 +33,9 @@
insns_ = insns;
}
-inline void CodeItemInstructionAccessor::Init(const CompactDexFile::CodeItem& code_item) {
+template <>
+inline void CodeItemInstructionAccessor::Init<CompactDexFile::CodeItem>(
+ const CompactDexFile::CodeItem& code_item) {
uint32_t insns_size_in_code_units;
code_item.DecodeFields</*kDecodeOnlyInstructionCount*/ true>(
&insns_size_in_code_units,
@@ -43,12 +46,14 @@
Init(insns_size_in_code_units, code_item.insns_);
}
-inline void CodeItemInstructionAccessor::Init(const StandardDexFile::CodeItem& code_item) {
+template <>
+inline void CodeItemInstructionAccessor::Init<StandardDexFile::CodeItem>(
+ const StandardDexFile::CodeItem& code_item) {
Init(code_item.insns_size_in_code_units_, code_item.insns_);
}
inline void CodeItemInstructionAccessor::Init(const DexFile& dex_file,
- const DexFile::CodeItem* code_item) {
+ const dex::CodeItem* code_item) {
if (code_item != nullptr) {
DCHECK(dex_file.IsInDataSection(code_item));
if (dex_file.IsCompactDexFile()) {
@@ -62,7 +67,7 @@
inline CodeItemInstructionAccessor::CodeItemInstructionAccessor(
const DexFile& dex_file,
- const DexFile::CodeItem* code_item) {
+ const dex::CodeItem* code_item) {
Init(dex_file, code_item);
}
@@ -82,7 +87,9 @@
DexInstructionIterator(insns_, insns_size_in_code_units_) };
}
-inline void CodeItemDataAccessor::Init(const CompactDexFile::CodeItem& code_item) {
+template <>
+inline void CodeItemDataAccessor::Init<CompactDexFile::CodeItem>(
+ const CompactDexFile::CodeItem& code_item) {
uint32_t insns_size_in_code_units;
code_item.DecodeFields</*kDecodeOnlyInstructionCount*/ false>(&insns_size_in_code_units,
®isters_size_,
@@ -92,7 +99,9 @@
CodeItemInstructionAccessor::Init(insns_size_in_code_units, code_item.insns_);
}
-inline void CodeItemDataAccessor::Init(const StandardDexFile::CodeItem& code_item) {
+template <>
+inline void CodeItemDataAccessor::Init<StandardDexFile::CodeItem>(
+ const StandardDexFile::CodeItem& code_item) {
CodeItemInstructionAccessor::Init(code_item);
registers_size_ = code_item.registers_size_;
ins_size_ = code_item.ins_size_;
@@ -101,24 +110,24 @@
}
inline void CodeItemDataAccessor::Init(const DexFile& dex_file,
- const DexFile::CodeItem* code_item) {
+ const dex::CodeItem* code_item) {
if (code_item != nullptr) {
if (dex_file.IsCompactDexFile()) {
- CodeItemDataAccessor::Init(down_cast<const CompactDexFile::CodeItem&>(*code_item));
+ Init(down_cast<const CompactDexFile::CodeItem&>(*code_item));
} else {
DCHECK(dex_file.IsStandardDexFile());
- CodeItemDataAccessor::Init(down_cast<const StandardDexFile::CodeItem&>(*code_item));
+ Init(down_cast<const StandardDexFile::CodeItem&>(*code_item));
}
}
}
inline CodeItemDataAccessor::CodeItemDataAccessor(const DexFile& dex_file,
- const DexFile::CodeItem* code_item) {
+ const dex::CodeItem* code_item) {
Init(dex_file, code_item);
}
-inline IterationRange<const DexFile::TryItem*> CodeItemDataAccessor::TryItems() const {
- const DexFile::TryItem* try_items = DexFile::GetTryItems(end(), 0u);
+inline IterationRange<const dex::TryItem*> CodeItemDataAccessor::TryItems() const {
+ const dex::TryItem* try_items = DexFile::GetTryItems(end(), 0u);
return {
try_items,
try_items + TriesSize() };
@@ -128,8 +137,8 @@
return DexFile::GetCatchHandlerData(end(), TriesSize(), offset);
}
-inline const DexFile::TryItem* CodeItemDataAccessor::FindTryItem(uint32_t try_dex_pc) const {
- IterationRange<const DexFile::TryItem*> try_items(TryItems());
+inline const dex::TryItem* CodeItemDataAccessor::FindTryItem(uint32_t try_dex_pc) const {
+ IterationRange<const dex::TryItem*> try_items(TryItems());
int32_t index = DexFile::FindTryItem(try_items.begin(),
try_items.end() - try_items.begin(),
try_dex_pc);
@@ -157,8 +166,25 @@
return reinterpret_cast<const void*>(handler_data);
}
+template <>
+inline void CodeItemDebugInfoAccessor::Init<CompactDexFile::CodeItem>(
+ const CompactDexFile::CodeItem& code_item,
+ uint32_t dex_method_index) {
+ debug_info_offset_ = down_cast<const CompactDexFile*>(dex_file_)->GetDebugInfoOffset(
+ dex_method_index);
+ CodeItemDataAccessor::Init(code_item);
+}
+
+template <>
+inline void CodeItemDebugInfoAccessor::Init<StandardDexFile::CodeItem>(
+ const StandardDexFile::CodeItem& code_item,
+ uint32_t dex_method_index ATTRIBUTE_UNUSED) {
+ debug_info_offset_ = code_item.debug_info_off_;
+ CodeItemDataAccessor::Init(code_item);
+}
+
inline void CodeItemDebugInfoAccessor::Init(const DexFile& dex_file,
- const DexFile::CodeItem* code_item,
+ const dex::CodeItem* code_item,
uint32_t dex_method_index) {
if (code_item == nullptr) {
return;
@@ -168,22 +194,10 @@
Init(down_cast<const CompactDexFile::CodeItem&>(*code_item), dex_method_index);
} else {
DCHECK(dex_file.IsStandardDexFile());
- Init(down_cast<const StandardDexFile::CodeItem&>(*code_item));
+ Init(down_cast<const StandardDexFile::CodeItem&>(*code_item), dex_method_index);
}
}
-inline void CodeItemDebugInfoAccessor::Init(const CompactDexFile::CodeItem& code_item,
- uint32_t dex_method_index) {
- debug_info_offset_ = down_cast<const CompactDexFile*>(dex_file_)->GetDebugInfoOffset(
- dex_method_index);
- CodeItemDataAccessor::Init(code_item);
-}
-
-inline void CodeItemDebugInfoAccessor::Init(const StandardDexFile::CodeItem& code_item) {
- debug_info_offset_ = code_item.debug_info_off_;
- CodeItemDataAccessor::Init(code_item);
-}
-
template<typename NewLocalVisitor>
inline bool CodeItemDebugInfoAccessor::DecodeDebugLocalInfo(
bool is_static,
diff --git a/libdexfile/dex/code_item_accessors.h b/libdexfile/dex/code_item_accessors.h
index c307c9f..794f234 100644
--- a/libdexfile/dex/code_item_accessors.h
+++ b/libdexfile/dex/code_item_accessors.h
@@ -19,21 +19,28 @@
#ifndef ART_LIBDEXFILE_DEX_CODE_ITEM_ACCESSORS_H_
#define ART_LIBDEXFILE_DEX_CODE_ITEM_ACCESSORS_H_
-#include "compact_dex_file.h"
-#include "dex_file.h"
+#include <android-base/logging.h>
+
#include "dex_instruction_iterator.h"
-#include "standard_dex_file.h"
namespace art {
+namespace dex {
+struct CodeItem;
+struct TryItem;
+} // namespace dex
+
class ArtMethod;
+class DexFile;
+template <typename Iter>
+class IterationRange;
// Abstracts accesses to the instruction fields of code items for CompactDexFile and
// StandardDexFile.
class CodeItemInstructionAccessor {
public:
ALWAYS_INLINE CodeItemInstructionAccessor(const DexFile& dex_file,
- const DexFile::CodeItem* code_item);
+ const dex::CodeItem* code_item);
ALWAYS_INLINE explicit CodeItemInstructionAccessor(ArtMethod* method);
@@ -71,9 +78,10 @@
CodeItemInstructionAccessor() = default;
ALWAYS_INLINE void Init(uint32_t insns_size_in_code_units, const uint16_t* insns);
- ALWAYS_INLINE void Init(const CompactDexFile::CodeItem& code_item);
- ALWAYS_INLINE void Init(const StandardDexFile::CodeItem& code_item);
- ALWAYS_INLINE void Init(const DexFile& dex_file, const DexFile::CodeItem* code_item);
+ ALWAYS_INLINE void Init(const DexFile& dex_file, const dex::CodeItem* code_item);
+
+ template <typename DexFileCodeItemType>
+ ALWAYS_INLINE void Init(const DexFileCodeItemType& code_item);
private:
// size of the insns array, in 2 byte code units. 0 if there is no code item.
@@ -87,7 +95,7 @@
// StandardDexFile.
class CodeItemDataAccessor : public CodeItemInstructionAccessor {
public:
- ALWAYS_INLINE CodeItemDataAccessor(const DexFile& dex_file, const DexFile::CodeItem* code_item);
+ ALWAYS_INLINE CodeItemDataAccessor(const DexFile& dex_file, const dex::CodeItem* code_item);
uint16_t RegistersSize() const {
return registers_size_;
@@ -105,20 +113,21 @@
return tries_size_;
}
- IterationRange<const DexFile::TryItem*> TryItems() const;
+ IterationRange<const dex::TryItem*> TryItems() const;
const uint8_t* GetCatchHandlerData(size_t offset = 0) const;
- const DexFile::TryItem* FindTryItem(uint32_t try_dex_pc) const;
+ const dex::TryItem* FindTryItem(uint32_t try_dex_pc) const;
inline const void* CodeItemDataEnd() const;
protected:
CodeItemDataAccessor() = default;
- ALWAYS_INLINE void Init(const CompactDexFile::CodeItem& code_item);
- ALWAYS_INLINE void Init(const StandardDexFile::CodeItem& code_item);
- ALWAYS_INLINE void Init(const DexFile& dex_file, const DexFile::CodeItem* code_item);
+ ALWAYS_INLINE void Init(const DexFile& dex_file, const dex::CodeItem* code_item);
+
+ template <typename DexFileCodeItemType>
+ ALWAYS_INLINE void Init(const DexFileCodeItemType& code_item);
private:
// Fields mirrored from the dex/cdex code item.
@@ -136,13 +145,13 @@
// Initialize with an existing offset.
ALWAYS_INLINE CodeItemDebugInfoAccessor(const DexFile& dex_file,
- const DexFile::CodeItem* code_item,
+ const dex::CodeItem* code_item,
uint32_t dex_method_index) {
Init(dex_file, code_item, dex_method_index);
}
ALWAYS_INLINE void Init(const DexFile& dex_file,
- const DexFile::CodeItem* code_item,
+ const dex::CodeItem* code_item,
uint32_t dex_method_index);
ALWAYS_INLINE explicit CodeItemDebugInfoAccessor(ArtMethod* method);
@@ -167,8 +176,8 @@
bool GetLineNumForPc(const uint32_t pc, uint32_t* line_num) const;
protected:
- ALWAYS_INLINE void Init(const CompactDexFile::CodeItem& code_item, uint32_t dex_method_index);
- ALWAYS_INLINE void Init(const StandardDexFile::CodeItem& code_item);
+ template <typename DexFileCodeItemType>
+ ALWAYS_INLINE void Init(const DexFileCodeItemType& code_item, uint32_t dex_method_index);
private:
const DexFile* dex_file_ = nullptr;
diff --git a/libdexfile/dex/code_item_accessors_test.cc b/libdexfile/dex/code_item_accessors_test.cc
index 87f4bab..c5891f9 100644
--- a/libdexfile/dex/code_item_accessors_test.cc
+++ b/libdexfile/dex/code_item_accessors_test.cc
@@ -71,7 +71,7 @@
static constexpr size_t kInsnsSizeInCodeUnits = 5;
auto verify_code_item = [&](const DexFile* dex,
- const DexFile::CodeItem* item,
+ const dex::CodeItem* item,
const uint16_t* insns) {
CodeItemInstructionAccessor insns_accessor(*dex, item);
EXPECT_TRUE(insns_accessor.HasCodeItem());
diff --git a/libdexfile/dex/compact_dex_file.cc b/libdexfile/dex/compact_dex_file.cc
index 641c523..a5044aa 100644
--- a/libdexfile/dex/compact_dex_file.cc
+++ b/libdexfile/dex/compact_dex_file.cc
@@ -55,7 +55,7 @@
static_cast<uint32_t>(FeatureFlags::kDefaultMethods)) != 0;
}
-uint32_t CompactDexFile::GetCodeItemSize(const DexFile::CodeItem& item) const {
+uint32_t CompactDexFile::GetCodeItemSize(const dex::CodeItem& item) const {
DCHECK(IsInDataSection(&item));
return reinterpret_cast<uintptr_t>(CodeItemDataAccessor(*this, &item).CodeItemDataEnd()) -
reinterpret_cast<uintptr_t>(&item);
diff --git a/libdexfile/dex/compact_dex_file.h b/libdexfile/dex/compact_dex_file.h
index 8eade6d..47edd51 100644
--- a/libdexfile/dex/compact_dex_file.h
+++ b/libdexfile/dex/compact_dex_file.h
@@ -84,7 +84,7 @@
// Like the standard code item except without a debug info offset. Each code item may have a
// preheader to encode large methods. In 99% of cases, the preheader is not used. This enables
// smaller size with a good fast path case in the accessors.
- struct CodeItem : public DexFile::CodeItem {
+ struct CodeItem : public dex::CodeItem {
static constexpr size_t kAlignment = sizeof(uint16_t);
// Max preheader size in uint16_ts.
static constexpr size_t kMaxPreHeaderSize = 6;
@@ -271,7 +271,7 @@
bool SupportsDefaultMethods() const override;
- uint32_t GetCodeItemSize(const DexFile::CodeItem& item) const override;
+ uint32_t GetCodeItemSize(const dex::CodeItem& item) const override;
uint32_t GetDebugInfoOffset(uint32_t dex_method_index) const {
return debug_info_offsets_.GetOffset(dex_method_index);
diff --git a/libdexfile/dex/dex_file-inl.h b/libdexfile/dex/dex_file-inl.h
index c884eee..2af1e04 100644
--- a/libdexfile/dex/dex_file-inl.h
+++ b/libdexfile/dex/dex_file-inl.h
@@ -20,6 +20,7 @@
#include "dex_file.h"
#include "base/casts.h"
+#include "base/iteration_range.h"
#include "base/leb128.h"
#include "base/stringpiece.h"
#include "base/utils.h"
@@ -31,12 +32,12 @@
namespace art {
-inline int32_t DexFile::GetStringLength(const StringId& string_id) const {
+inline int32_t DexFile::GetStringLength(const dex::StringId& string_id) const {
const uint8_t* ptr = DataBegin() + string_id.string_data_off_;
return DecodeUnsignedLeb128(&ptr);
}
-inline const char* DexFile::GetStringDataAndUtf16Length(const StringId& string_id,
+inline const char* DexFile::GetStringDataAndUtf16Length(const dex::StringId& string_id,
uint32_t* utf16_length) const {
DCHECK(utf16_length != nullptr) << GetLocation();
const uint8_t* ptr = DataBegin() + string_id.string_data_off_;
@@ -44,7 +45,7 @@
return reinterpret_cast<const char*>(ptr);
}
-inline const char* DexFile::GetStringData(const StringId& string_id) const {
+inline const char* DexFile::GetStringData(const dex::StringId& string_id) const {
uint32_t ignored;
return GetStringDataAndUtf16Length(string_id, &ignored);
}
@@ -55,7 +56,7 @@
*utf16_length = 0;
return nullptr;
}
- const StringId& string_id = GetStringId(idx);
+ const dex::StringId& string_id = GetStringId(idx);
return GetStringDataAndUtf16Length(string_id, utf16_length);
}
@@ -68,7 +69,7 @@
if (!idx.IsValid()) {
return nullptr;
}
- const TypeId& type_id = GetTypeId(idx);
+ const dex::TypeId& type_id = GetTypeId(idx);
return StringDataAndUtf16LengthByIdx(type_id.descriptor_idx_, unicode_length);
}
@@ -76,41 +77,43 @@
if (!idx.IsValid()) {
return nullptr;
}
- const TypeId& type_id = GetTypeId(idx);
+ const dex::TypeId& type_id = GetTypeId(idx);
return StringDataByIdx(type_id.descriptor_idx_);
}
-inline const char* DexFile::GetTypeDescriptor(const TypeId& type_id) const {
+inline const char* DexFile::GetTypeDescriptor(const dex::TypeId& type_id) const {
return StringDataByIdx(type_id.descriptor_idx_);
}
-inline const char* DexFile::GetFieldTypeDescriptor(const FieldId& field_id) const {
- const DexFile::TypeId& type_id = GetTypeId(field_id.type_idx_);
+inline const char* DexFile::GetFieldTypeDescriptor(const dex::FieldId& field_id) const {
+ const dex::TypeId& type_id = GetTypeId(field_id.type_idx_);
return GetTypeDescriptor(type_id);
}
-inline const char* DexFile::GetFieldName(const FieldId& field_id) const {
+inline const char* DexFile::GetFieldName(const dex::FieldId& field_id) const {
return StringDataByIdx(field_id.name_idx_);
}
-inline const char* DexFile::GetMethodDeclaringClassDescriptor(const MethodId& method_id) const {
- const DexFile::TypeId& type_id = GetTypeId(method_id.class_idx_);
+inline const char* DexFile::GetMethodDeclaringClassDescriptor(const dex::MethodId& method_id)
+ const {
+ const dex::TypeId& type_id = GetTypeId(method_id.class_idx_);
return GetTypeDescriptor(type_id);
}
-inline const Signature DexFile::GetMethodSignature(const MethodId& method_id) const {
+inline const Signature DexFile::GetMethodSignature(const dex::MethodId& method_id) const {
return Signature(this, GetProtoId(method_id.proto_idx_));
}
-inline const Signature DexFile::GetProtoSignature(const ProtoId& proto_id) const {
+inline const Signature DexFile::GetProtoSignature(const dex::ProtoId& proto_id) const {
return Signature(this, proto_id);
}
-inline const char* DexFile::GetMethodName(const MethodId& method_id) const {
+inline const char* DexFile::GetMethodName(const dex::MethodId& method_id) const {
return StringDataByIdx(method_id.name_idx_);
}
-inline const char* DexFile::GetMethodName(const MethodId& method_id, uint32_t* utf_length) const {
+inline const char* DexFile::GetMethodName(const dex::MethodId& method_id, uint32_t* utf_length)
+ const {
return StringDataAndUtf16LengthByIdx(method_id.name_idx_, utf_length);
}
@@ -122,36 +125,38 @@
return StringDataByIdx(GetProtoId(GetMethodId(idx).proto_idx_).shorty_idx_);
}
-inline const char* DexFile::GetMethodShorty(const MethodId& method_id) const {
+inline const char* DexFile::GetMethodShorty(const dex::MethodId& method_id) const {
return StringDataByIdx(GetProtoId(method_id.proto_idx_).shorty_idx_);
}
-inline const char* DexFile::GetMethodShorty(const MethodId& method_id, uint32_t* length) const {
+inline const char* DexFile::GetMethodShorty(const dex::MethodId& method_id, uint32_t* length)
+ const {
// Using the UTF16 length is safe here as shorties are guaranteed to be ASCII characters.
return StringDataAndUtf16LengthByIdx(GetProtoId(method_id.proto_idx_).shorty_idx_, length);
}
-inline const char* DexFile::GetClassDescriptor(const ClassDef& class_def) const {
+inline const char* DexFile::GetClassDescriptor(const dex::ClassDef& class_def) const {
return StringByTypeIdx(class_def.class_idx_);
}
-inline const char* DexFile::GetReturnTypeDescriptor(const ProtoId& proto_id) const {
+inline const char* DexFile::GetReturnTypeDescriptor(const dex::ProtoId& proto_id) const {
return StringByTypeIdx(proto_id.return_type_idx_);
}
inline const char* DexFile::GetShorty(dex::ProtoIndex proto_idx) const {
- const ProtoId& proto_id = GetProtoId(proto_idx);
+ const dex::ProtoId& proto_id = GetProtoId(proto_idx);
return StringDataByIdx(proto_id.shorty_idx_);
}
-inline const DexFile::TryItem* DexFile::GetTryItems(const DexInstructionIterator& code_item_end,
- uint32_t offset) {
- return reinterpret_cast<const TryItem*>
- (RoundUp(reinterpret_cast<uintptr_t>(&code_item_end.Inst()), TryItem::kAlignment)) + offset;
+inline const dex::TryItem* DexFile::GetTryItems(const DexInstructionIterator& code_item_end,
+ uint32_t offset) {
+ return reinterpret_cast<const dex::TryItem*>
+ (RoundUp(reinterpret_cast<uintptr_t>(&code_item_end.Inst()), dex::TryItem::kAlignment)) +
+ offset;
}
-static inline bool DexFileStringEquals(const DexFile* df1, dex::StringIndex sidx1,
- const DexFile* df2, dex::StringIndex sidx2) {
+inline bool DexFile::StringEquals(const DexFile* df1, dex::StringIndex sidx1,
+ const DexFile* df2, dex::StringIndex sidx2) {
uint32_t s1_len; // Note: utf16 length != mutf8 length.
const char* s1_data = df1->StringDataAndUtf16LengthByIdx(sidx1, &s1_len);
uint32_t s2_len;
@@ -159,60 +164,6 @@
return (s1_len == s2_len) && (strcmp(s1_data, s2_data) == 0);
}
-inline bool Signature::operator==(const Signature& rhs) const {
- if (dex_file_ == nullptr) {
- return rhs.dex_file_ == nullptr;
- }
- if (rhs.dex_file_ == nullptr) {
- return false;
- }
- if (dex_file_ == rhs.dex_file_) {
- return proto_id_ == rhs.proto_id_;
- }
- uint32_t lhs_shorty_len; // For a shorty utf16 length == mutf8 length.
- const char* lhs_shorty_data = dex_file_->StringDataAndUtf16LengthByIdx(proto_id_->shorty_idx_,
- &lhs_shorty_len);
- StringPiece lhs_shorty(lhs_shorty_data, lhs_shorty_len);
- {
- uint32_t rhs_shorty_len;
- const char* rhs_shorty_data =
- rhs.dex_file_->StringDataAndUtf16LengthByIdx(rhs.proto_id_->shorty_idx_,
- &rhs_shorty_len);
- StringPiece rhs_shorty(rhs_shorty_data, rhs_shorty_len);
- if (lhs_shorty != rhs_shorty) {
- return false; // Shorty mismatch.
- }
- }
- if (lhs_shorty[0] == 'L') {
- const DexFile::TypeId& return_type_id = dex_file_->GetTypeId(proto_id_->return_type_idx_);
- const DexFile::TypeId& rhs_return_type_id =
- rhs.dex_file_->GetTypeId(rhs.proto_id_->return_type_idx_);
- if (!DexFileStringEquals(dex_file_, return_type_id.descriptor_idx_,
- rhs.dex_file_, rhs_return_type_id.descriptor_idx_)) {
- return false; // Return type mismatch.
- }
- }
- if (lhs_shorty.find('L', 1) != StringPiece::npos) {
- const DexFile::TypeList* params = dex_file_->GetProtoParameters(*proto_id_);
- const DexFile::TypeList* rhs_params = rhs.dex_file_->GetProtoParameters(*rhs.proto_id_);
- // We found a reference parameter in the matching shorty, so both lists must be non-empty.
- DCHECK(params != nullptr);
- DCHECK(rhs_params != nullptr);
- uint32_t params_size = params->Size();
- DCHECK_EQ(params_size, rhs_params->Size()); // Parameter list size must match.
- for (uint32_t i = 0; i < params_size; ++i) {
- const DexFile::TypeId& param_id = dex_file_->GetTypeId(params->GetTypeItem(i).type_idx_);
- const DexFile::TypeId& rhs_param_id =
- rhs.dex_file_->GetTypeId(rhs_params->GetTypeItem(i).type_idx_);
- if (!DexFileStringEquals(dex_file_, param_id.descriptor_idx_,
- rhs.dex_file_, rhs_param_id.descriptor_idx_)) {
- return false; // Parameter type mismatch.
- }
- }
- }
- return true;
-}
-
template<typename NewLocalCallback, typename IndexToStringData, typename TypeIndexToStringData>
bool DexFile::DecodeDebugLocalInfo(const uint8_t* stream,
const std::string& location,
diff --git a/libdexfile/dex/dex_file.cc b/libdexfile/dex/dex_file.cc
index 7ccb9c0..5c100e6 100644
--- a/libdexfile/dex/dex_file.cc
+++ b/libdexfile/dex/dex_file.cc
@@ -41,6 +41,19 @@
using android::base::StringPrintf;
+using dex::CallSiteIdItem;
+using dex::ClassDef;
+using dex::FieldId;
+using dex::MapList;
+using dex::MapItem;
+using dex::MethodHandleItem;
+using dex::MethodId;
+using dex::ProtoId;
+using dex::StringId;
+using dex::TryItem;
+using dex::TypeId;
+using dex::TypeList;
+
static_assert(sizeof(dex::StringIndex) == sizeof(uint32_t), "StringIndex size is wrong");
static_assert(std::is_trivially_copyable<dex::StringIndex>::value, "StringIndex not trivial");
static_assert(sizeof(dex::TypeIndex) == sizeof(uint16_t), "TypeIndex size is wrong");
@@ -195,7 +208,7 @@
return atoi(version);
}
-const DexFile::ClassDef* DexFile::FindClassDef(dex::TypeIndex type_idx) const {
+const ClassDef* DexFile::FindClassDef(dex::TypeIndex type_idx) const {
size_t num_class_defs = NumClassDefs();
// Fast path for rare no class defs case.
if (num_class_defs == 0) {
@@ -210,8 +223,7 @@
return nullptr;
}
-uint32_t DexFile::FindCodeItemOffset(const DexFile::ClassDef& class_def,
- uint32_t method_idx) const {
+uint32_t DexFile::FindCodeItemOffset(const ClassDef& class_def, uint32_t method_idx) const {
ClassAccessor accessor(*this, class_def);
CHECK(accessor.HasClassData());
for (const ClassAccessor::Method& method : accessor.GetMethods()) {
@@ -223,9 +235,9 @@
UNREACHABLE();
}
-const DexFile::FieldId* DexFile::FindFieldId(const DexFile::TypeId& declaring_klass,
- const DexFile::StringId& name,
- const DexFile::TypeId& type) const {
+const FieldId* DexFile::FindFieldId(const TypeId& declaring_klass,
+ const StringId& name,
+ const TypeId& type) const {
// Binary search MethodIds knowing that they are sorted by class_idx, name_idx then proto_idx
const dex::TypeIndex class_idx = GetIndexForTypeId(declaring_klass);
const dex::StringIndex name_idx = GetIndexForStringId(name);
@@ -234,7 +246,7 @@
int32_t hi = NumFieldIds() - 1;
while (hi >= lo) {
int32_t mid = (hi + lo) / 2;
- const DexFile::FieldId& field = GetFieldId(mid);
+ const FieldId& field = GetFieldId(mid);
if (class_idx > field.class_idx_) {
lo = mid + 1;
} else if (class_idx < field.class_idx_) {
@@ -258,9 +270,9 @@
return nullptr;
}
-const DexFile::MethodId* DexFile::FindMethodId(const DexFile::TypeId& declaring_klass,
- const DexFile::StringId& name,
- const DexFile::ProtoId& signature) const {
+const MethodId* DexFile::FindMethodId(const TypeId& declaring_klass,
+ const StringId& name,
+ const ProtoId& signature) const {
// Binary search MethodIds knowing that they are sorted by class_idx, name_idx then proto_idx
const dex::TypeIndex class_idx = GetIndexForTypeId(declaring_klass);
const dex::StringIndex name_idx = GetIndexForStringId(name);
@@ -269,7 +281,7 @@
int32_t hi = NumMethodIds() - 1;
while (hi >= lo) {
int32_t mid = (hi + lo) / 2;
- const DexFile::MethodId& method = GetMethodId(mid);
+ const MethodId& method = GetMethodId(mid);
if (class_idx > method.class_idx_) {
lo = mid + 1;
} else if (class_idx < method.class_idx_) {
@@ -293,12 +305,12 @@
return nullptr;
}
-const DexFile::StringId* DexFile::FindStringId(const char* string) const {
+const StringId* DexFile::FindStringId(const char* string) const {
int32_t lo = 0;
int32_t hi = NumStringIds() - 1;
while (hi >= lo) {
int32_t mid = (hi + lo) / 2;
- const DexFile::StringId& str_id = GetStringId(dex::StringIndex(mid));
+ const StringId& str_id = GetStringId(dex::StringIndex(mid));
const char* str = GetStringData(str_id);
int compare = CompareModifiedUtf8ToModifiedUtf8AsUtf16CodePointValues(string, str);
if (compare > 0) {
@@ -312,13 +324,13 @@
return nullptr;
}
-const DexFile::TypeId* DexFile::FindTypeId(const char* string) const {
+const TypeId* DexFile::FindTypeId(const char* string) const {
int32_t lo = 0;
int32_t hi = NumTypeIds() - 1;
while (hi >= lo) {
int32_t mid = (hi + lo) / 2;
const TypeId& type_id = GetTypeId(dex::TypeIndex(mid));
- const DexFile::StringId& str_id = GetStringId(type_id.descriptor_idx_);
+ const StringId& str_id = GetStringId(type_id.descriptor_idx_);
const char* str = GetStringData(str_id);
int compare = CompareModifiedUtf8ToModifiedUtf8AsUtf16CodePointValues(string, str);
if (compare > 0) {
@@ -332,7 +344,7 @@
return nullptr;
}
-const DexFile::TypeId* DexFile::FindTypeId(dex::StringIndex string_idx) const {
+const TypeId* DexFile::FindTypeId(dex::StringIndex string_idx) const {
int32_t lo = 0;
int32_t hi = NumTypeIds() - 1;
while (hi >= lo) {
@@ -349,15 +361,15 @@
return nullptr;
}
-const DexFile::ProtoId* DexFile::FindProtoId(dex::TypeIndex return_type_idx,
- const dex::TypeIndex* signature_type_idxs,
- uint32_t signature_length) const {
+const ProtoId* DexFile::FindProtoId(dex::TypeIndex return_type_idx,
+ const dex::TypeIndex* signature_type_idxs,
+ uint32_t signature_length) const {
int32_t lo = 0;
int32_t hi = NumProtoIds() - 1;
while (hi >= lo) {
int32_t mid = (hi + lo) / 2;
const dex::ProtoIndex proto_idx = static_cast<dex::ProtoIndex>(mid);
- const DexFile::ProtoId& proto = GetProtoId(proto_idx);
+ const ProtoId& proto = GetProtoId(proto_idx);
int compare = return_type_idx.index_ - proto.return_type_idx_.index_;
if (compare == 0) {
DexFileParameterIterator it(*this, proto);
@@ -422,7 +434,7 @@
}
// TODO: avoid creating a std::string just to get a 0-terminated char array
std::string descriptor(signature.data() + start_offset, offset - start_offset);
- const DexFile::TypeId* type_id = FindTypeId(descriptor.c_str());
+ const TypeId* type_id = FindTypeId(descriptor.c_str());
if (type_id == nullptr) {
return false;
}
@@ -457,7 +469,7 @@
while (min < max) {
const uint32_t mid = (min + max) / 2;
- const art::DexFile::TryItem& ti = try_items[mid];
+ const TryItem& ti = try_items[mid];
const uint32_t start = ti.start_addr_;
const uint32_t end = start + ti.insn_count_;
@@ -523,9 +535,9 @@
if (method_idx >= NumMethodIds()) {
return StringPrintf("<<invalid-method-idx-%d>>", method_idx);
}
- const DexFile::MethodId& method_id = GetMethodId(method_idx);
+ const MethodId& method_id = GetMethodId(method_idx);
std::string result;
- const DexFile::ProtoId* proto_id = with_signature ? &GetProtoId(method_id.proto_idx_) : nullptr;
+ const ProtoId* proto_id = with_signature ? &GetProtoId(method_id.proto_idx_) : nullptr;
if (with_signature) {
AppendPrettyDescriptor(StringByTypeIdx(proto_id->return_type_idx_), &result);
result += ' ';
@@ -535,7 +547,7 @@
result += GetMethodName(method_id);
if (with_signature) {
result += '(';
- const DexFile::TypeList* params = GetProtoParameters(*proto_id);
+ const TypeList* params = GetProtoParameters(*proto_id);
if (params != nullptr) {
const char* separator = "";
for (uint32_t i = 0u, size = params->Size(); i != size; ++i) {
@@ -553,7 +565,7 @@
if (field_idx >= NumFieldIds()) {
return StringPrintf("<<invalid-field-idx-%d>>", field_idx);
}
- const DexFile::FieldId& field_id = GetFieldId(field_idx);
+ const FieldId& field_id = GetFieldId(field_idx);
std::string result;
if (with_type) {
result += GetFieldTypeDescriptor(field_id);
@@ -569,12 +581,12 @@
if (type_idx.index_ >= NumTypeIds()) {
return StringPrintf("<<invalid-type-idx-%d>>", type_idx.index_);
}
- const DexFile::TypeId& type_id = GetTypeId(type_idx);
+ const TypeId& type_id = GetTypeId(type_idx);
return PrettyDescriptor(GetTypeDescriptor(type_id));
}
dex::ProtoIndex DexFile::GetProtoIndexForCallSite(uint32_t call_site_idx) const {
- const DexFile::CallSiteIdItem& csi = GetCallSiteId(call_site_idx);
+ const CallSiteIdItem& csi = GetCallSiteId(call_site_idx);
CallSiteArrayValueIterator it(*this, csi);
it.Next();
it.Next();
@@ -592,66 +604,6 @@
return os;
}
-std::string Signature::ToString() const {
- if (dex_file_ == nullptr) {
- CHECK(proto_id_ == nullptr);
- return "<no signature>";
- }
- const DexFile::TypeList* params = dex_file_->GetProtoParameters(*proto_id_);
- std::string result;
- if (params == nullptr) {
- result += "()";
- } else {
- result += "(";
- for (uint32_t i = 0; i < params->Size(); ++i) {
- result += dex_file_->StringByTypeIdx(params->GetTypeItem(i).type_idx_);
- }
- result += ")";
- }
- result += dex_file_->StringByTypeIdx(proto_id_->return_type_idx_);
- return result;
-}
-
-uint32_t Signature::GetNumberOfParameters() const {
- const DexFile::TypeList* params = dex_file_->GetProtoParameters(*proto_id_);
- return (params != nullptr) ? params->Size() : 0;
-}
-
-bool Signature::IsVoid() const {
- const char* return_type = dex_file_->GetReturnTypeDescriptor(*proto_id_);
- return strcmp(return_type, "V") == 0;
-}
-
-bool Signature::operator==(const StringPiece& rhs) const {
- if (dex_file_ == nullptr) {
- return false;
- }
- StringPiece tail(rhs);
- if (!tail.starts_with("(")) {
- return false; // Invalid signature
- }
- tail.remove_prefix(1); // "(";
- const DexFile::TypeList* params = dex_file_->GetProtoParameters(*proto_id_);
- if (params != nullptr) {
- for (uint32_t i = 0; i < params->Size(); ++i) {
- StringPiece param(dex_file_->StringByTypeIdx(params->GetTypeItem(i).type_idx_));
- if (!tail.starts_with(param)) {
- return false;
- }
- tail.remove_prefix(param.length());
- }
- }
- if (!tail.starts_with(")")) {
- return false;
- }
- tail.remove_prefix(1); // ")";
- return tail == dex_file_->StringByTypeIdx(proto_id_->return_type_idx_);
-}
-
-std::ostream& operator<<(std::ostream& os, const Signature& sig) {
- return os << sig.ToString();
-}
-
EncodedArrayValueIterator::EncodedArrayValueIterator(const DexFile& dex_file,
const uint8_t* array_data)
: dex_file_(dex_file),
diff --git a/libdexfile/dex/dex_file.h b/libdexfile/dex/dex_file.h
index 83f47fe..a940a66 100644
--- a/libdexfile/dex/dex_file.h
+++ b/libdexfile/dex/dex_file.h
@@ -24,13 +24,14 @@
#include <android-base/logging.h>
#include "base/globals.h"
-#include "base/iteration_range.h"
#include "base/macros.h"
#include "base/value_object.h"
#include "class_iterator.h"
+#include "dex_file_structs.h"
#include "dex_file_types.h"
#include "jni.h"
#include "modifiers.h"
+#include "signature.h"
namespace art {
@@ -38,9 +39,9 @@
class CompactDexFile;
class DexInstructionIterator;
enum InvokeType : uint32_t;
+template <typename Iter> class IterationRange;
class MemMap;
class OatDexFile;
-class Signature;
class StandardDexFile;
class StringPiece;
class ZipArchive;
@@ -136,150 +137,6 @@
kDexTypeHiddenapiClassData = 0xF000,
};
- struct MapItem {
- uint16_t type_;
- uint16_t unused_;
- uint32_t size_;
- uint32_t offset_;
- };
-
- struct MapList {
- uint32_t size_;
- MapItem list_[1];
-
- size_t Size() const { return sizeof(uint32_t) + (size_ * sizeof(MapItem)); }
-
- private:
- DISALLOW_COPY_AND_ASSIGN(MapList);
- };
-
- // Raw string_id_item.
- struct StringId {
- uint32_t string_data_off_; // offset in bytes from the base address
-
- private:
- DISALLOW_COPY_AND_ASSIGN(StringId);
- };
-
- // Raw type_id_item.
- struct TypeId {
- dex::StringIndex descriptor_idx_; // index into string_ids
-
- private:
- DISALLOW_COPY_AND_ASSIGN(TypeId);
- };
-
- // Raw field_id_item.
- struct FieldId {
- dex::TypeIndex class_idx_; // index into type_ids_ array for defining class
- dex::TypeIndex type_idx_; // index into type_ids_ array for field type
- dex::StringIndex name_idx_; // index into string_ids_ array for field name
-
- private:
- DISALLOW_COPY_AND_ASSIGN(FieldId);
- };
-
- // Raw proto_id_item.
- struct ProtoId {
- dex::StringIndex shorty_idx_; // index into string_ids array for shorty descriptor
- dex::TypeIndex return_type_idx_; // index into type_ids array for return type
- uint16_t pad_; // padding = 0
- uint32_t parameters_off_; // file offset to type_list for parameter types
-
- private:
- DISALLOW_COPY_AND_ASSIGN(ProtoId);
- };
-
- // Raw method_id_item.
- struct MethodId {
- dex::TypeIndex class_idx_; // index into type_ids_ array for defining class
- dex::ProtoIndex proto_idx_; // index into proto_ids_ array for method prototype
- dex::StringIndex name_idx_; // index into string_ids_ array for method name
-
- private:
- DISALLOW_COPY_AND_ASSIGN(MethodId);
- };
-
- // Base code_item, compact dex and standard dex have different code item layouts.
- struct CodeItem {
- protected:
- CodeItem() = default;
-
- private:
- DISALLOW_COPY_AND_ASSIGN(CodeItem);
- };
-
- // Raw class_def_item.
- struct ClassDef {
- dex::TypeIndex class_idx_; // index into type_ids_ array for this class
- uint16_t pad1_; // padding = 0
- uint32_t access_flags_;
- dex::TypeIndex superclass_idx_; // index into type_ids_ array for superclass
- uint16_t pad2_; // padding = 0
- uint32_t interfaces_off_; // file offset to TypeList
- dex::StringIndex source_file_idx_; // index into string_ids_ for source file name
- uint32_t annotations_off_; // file offset to annotations_directory_item
- uint32_t class_data_off_; // file offset to class_data_item
- uint32_t static_values_off_; // file offset to EncodedArray
-
- // Returns the valid access flags, that is, Java modifier bits relevant to the ClassDef type
- // (class or interface). These are all in the lower 16b and do not contain runtime flags.
- uint32_t GetJavaAccessFlags() const {
- // Make sure that none of our runtime-only flags are set.
- static_assert((kAccValidClassFlags & kAccJavaFlagsMask) == kAccValidClassFlags,
- "Valid class flags not a subset of Java flags");
- static_assert((kAccValidInterfaceFlags & kAccJavaFlagsMask) == kAccValidInterfaceFlags,
- "Valid interface flags not a subset of Java flags");
-
- if ((access_flags_ & kAccInterface) != 0) {
- // Interface.
- return access_flags_ & kAccValidInterfaceFlags;
- } else {
- // Class.
- return access_flags_ & kAccValidClassFlags;
- }
- }
-
- private:
- DISALLOW_COPY_AND_ASSIGN(ClassDef);
- };
-
- // Raw type_item.
- struct TypeItem {
- dex::TypeIndex type_idx_; // index into type_ids section
-
- private:
- DISALLOW_COPY_AND_ASSIGN(TypeItem);
- };
-
- // Raw type_list.
- class TypeList {
- public:
- uint32_t Size() const {
- return size_;
- }
-
- const TypeItem& GetTypeItem(uint32_t idx) const {
- DCHECK_LT(idx, this->size_);
- return this->list_[idx];
- }
-
- // Size in bytes of the part of the list that is common.
- static constexpr size_t GetHeaderSize() {
- return 4U;
- }
-
- // Size in bytes of the whole type list including all the stored elements.
- static constexpr size_t GetListSize(size_t count) {
- return GetHeaderSize() + sizeof(TypeItem) * count;
- }
-
- private:
- uint32_t size_; // size of the list, in entries
- TypeItem list_[1]; // elements of the list
- DISALLOW_COPY_AND_ASSIGN(TypeList);
- };
-
// MethodHandle Types
enum class MethodHandleType : uint16_t { // private
kStaticPut = 0x0000, // a setter for a given static field.
@@ -296,37 +153,6 @@
kLast = kInvokeInterface
};
- // raw method_handle_item
- struct MethodHandleItem {
- uint16_t method_handle_type_;
- uint16_t reserved1_; // Reserved for future use.
- uint16_t field_or_method_idx_; // Field index for accessors, method index otherwise.
- uint16_t reserved2_; // Reserved for future use.
- private:
- DISALLOW_COPY_AND_ASSIGN(MethodHandleItem);
- };
-
- // raw call_site_id_item
- struct CallSiteIdItem {
- uint32_t data_off_; // Offset into data section pointing to encoded array items.
- private:
- DISALLOW_COPY_AND_ASSIGN(CallSiteIdItem);
- };
-
- // Raw try_item.
- struct TryItem {
- static constexpr size_t kAlignment = sizeof(uint32_t);
-
- uint32_t start_addr_;
- uint16_t insn_count_;
- uint16_t handler_off_;
-
- private:
- TryItem() = default;
- friend class DexWriter;
- DISALLOW_COPY_AND_ASSIGN(TryItem);
- };
-
// Annotation constants.
enum {
kDexVisibilityBuild = 0x00, /* annotation visibility */
@@ -356,92 +182,6 @@
kDexAnnotationValueArgShift = 5,
};
- struct AnnotationsDirectoryItem {
- uint32_t class_annotations_off_;
- uint32_t fields_size_;
- uint32_t methods_size_;
- uint32_t parameters_size_;
-
- private:
- DISALLOW_COPY_AND_ASSIGN(AnnotationsDirectoryItem);
- };
-
- struct FieldAnnotationsItem {
- uint32_t field_idx_;
- uint32_t annotations_off_;
-
- private:
- DISALLOW_COPY_AND_ASSIGN(FieldAnnotationsItem);
- };
-
- struct MethodAnnotationsItem {
- uint32_t method_idx_;
- uint32_t annotations_off_;
-
- private:
- DISALLOW_COPY_AND_ASSIGN(MethodAnnotationsItem);
- };
-
- struct ParameterAnnotationsItem {
- uint32_t method_idx_;
- uint32_t annotations_off_;
-
- private:
- DISALLOW_COPY_AND_ASSIGN(ParameterAnnotationsItem);
- };
-
- struct AnnotationSetRefItem {
- uint32_t annotations_off_;
-
- private:
- DISALLOW_COPY_AND_ASSIGN(AnnotationSetRefItem);
- };
-
- struct AnnotationSetRefList {
- uint32_t size_;
- AnnotationSetRefItem list_[1];
-
- private:
- DISALLOW_COPY_AND_ASSIGN(AnnotationSetRefList);
- };
-
- struct AnnotationSetItem {
- uint32_t size_;
- uint32_t entries_[1];
-
- private:
- DISALLOW_COPY_AND_ASSIGN(AnnotationSetItem);
- };
-
- struct AnnotationItem {
- uint8_t visibility_;
- uint8_t annotation_[1];
-
- private:
- DISALLOW_COPY_AND_ASSIGN(AnnotationItem);
- };
-
- struct HiddenapiClassData {
- uint32_t size_; // total size of the item
- uint32_t flags_offset_[1]; // array of offsets from the beginning of this item,
- // indexed by class def index
-
- // Returns a pointer to the beginning of a uleb128-stream of hiddenapi
- // flags for a class def of given index. Values are in the same order
- // as fields/methods in the class data. Returns null if the class does
- // not have class data.
- const uint8_t* GetFlagsPointer(uint32_t class_def_idx) const {
- if (flags_offset_[class_def_idx] == 0) {
- return nullptr;
- } else {
- return reinterpret_cast<const uint8_t*>(this) + flags_offset_[class_def_idx];
- }
- }
-
- private:
- DISALLOW_COPY_AND_ASSIGN(HiddenapiClassData);
- };
-
enum AnnotationResultStyle { // private
kAllObjects,
kPrimitivesOrObjects,
@@ -496,25 +236,26 @@
}
// Returns the StringId at the specified index.
- const StringId& GetStringId(dex::StringIndex idx) const {
+ const dex::StringId& GetStringId(dex::StringIndex idx) const {
DCHECK_LT(idx.index_, NumStringIds()) << GetLocation();
return string_ids_[idx.index_];
}
- dex::StringIndex GetIndexForStringId(const StringId& string_id) const {
+ dex::StringIndex GetIndexForStringId(const dex::StringId& string_id) const {
CHECK_GE(&string_id, string_ids_) << GetLocation();
CHECK_LT(&string_id, string_ids_ + header_->string_ids_size_) << GetLocation();
return dex::StringIndex(&string_id - string_ids_);
}
- int32_t GetStringLength(const StringId& string_id) const;
+ int32_t GetStringLength(const dex::StringId& string_id) const;
// Returns a pointer to the UTF-8 string data referred to by the given string_id as well as the
// length of the string when decoded as a UTF-16 string. Note the UTF-16 length is not the same
// as the string length of the string data.
- const char* GetStringDataAndUtf16Length(const StringId& string_id, uint32_t* utf16_length) const;
+ const char* GetStringDataAndUtf16Length(const dex::StringId& string_id,
+ uint32_t* utf16_length) const;
- const char* GetStringData(const StringId& string_id) const;
+ const char* GetStringData(const dex::StringId& string_id) const;
// Index version of GetStringDataAndUtf16Length.
const char* StringDataAndUtf16LengthByIdx(dex::StringIndex idx, uint32_t* utf16_length) const;
@@ -522,9 +263,9 @@
const char* StringDataByIdx(dex::StringIndex idx) const;
// Looks up a string id for a given modified utf8 string.
- const StringId* FindStringId(const char* string) const;
+ const dex::StringId* FindStringId(const char* string) const;
- const TypeId* FindTypeId(const char* string) const;
+ const dex::TypeId* FindTypeId(const char* string) const;
// Returns the number of type identifiers in the .dex file.
uint32_t NumTypeIds() const {
@@ -537,12 +278,12 @@
}
// Returns the TypeId at the specified index.
- const TypeId& GetTypeId(dex::TypeIndex idx) const {
+ const dex::TypeId& GetTypeId(dex::TypeIndex idx) const {
DCHECK_LT(idx.index_, NumTypeIds()) << GetLocation();
return type_ids_[idx.index_];
}
- dex::TypeIndex GetIndexForTypeId(const TypeId& type_id) const {
+ dex::TypeIndex GetIndexForTypeId(const dex::TypeId& type_id) const {
CHECK_GE(&type_id, type_ids_) << GetLocation();
CHECK_LT(&type_id, type_ids_ + header_->type_ids_size_) << GetLocation();
size_t result = &type_id - type_ids_;
@@ -556,10 +297,10 @@
const char* StringByTypeIdx(dex::TypeIndex idx) const;
// Returns the type descriptor string of a type id.
- const char* GetTypeDescriptor(const TypeId& type_id) const;
+ const char* GetTypeDescriptor(const dex::TypeId& type_id) const;
// Looks up a type for the given string index
- const TypeId* FindTypeId(dex::StringIndex string_idx) const;
+ const dex::TypeId* FindTypeId(dex::StringIndex string_idx) const;
// Returns the number of field identifiers in the .dex file.
size_t NumFieldIds() const {
@@ -568,38 +309,38 @@
}
// Returns the FieldId at the specified index.
- const FieldId& GetFieldId(uint32_t idx) const {
+ const dex::FieldId& GetFieldId(uint32_t idx) const {
DCHECK_LT(idx, NumFieldIds()) << GetLocation();
return field_ids_[idx];
}
- uint32_t GetIndexForFieldId(const FieldId& field_id) const {
+ uint32_t GetIndexForFieldId(const dex::FieldId& field_id) const {
CHECK_GE(&field_id, field_ids_) << GetLocation();
CHECK_LT(&field_id, field_ids_ + header_->field_ids_size_) << GetLocation();
return &field_id - field_ids_;
}
// Looks up a field by its declaring class, name and type
- const FieldId* FindFieldId(const DexFile::TypeId& declaring_klass,
- const DexFile::StringId& name,
- const DexFile::TypeId& type) const;
+ const dex::FieldId* FindFieldId(const dex::TypeId& declaring_klass,
+ const dex::StringId& name,
+ const dex::TypeId& type) const;
- uint32_t FindCodeItemOffset(const DexFile::ClassDef& class_def,
+ uint32_t FindCodeItemOffset(const dex::ClassDef& class_def,
uint32_t dex_method_idx) const;
- virtual uint32_t GetCodeItemSize(const DexFile::CodeItem& disk_code_item) const = 0;
+ virtual uint32_t GetCodeItemSize(const dex::CodeItem& disk_code_item) const = 0;
// Returns the declaring class descriptor string of a field id.
- const char* GetFieldDeclaringClassDescriptor(const FieldId& field_id) const {
- const DexFile::TypeId& type_id = GetTypeId(field_id.class_idx_);
+ const char* GetFieldDeclaringClassDescriptor(const dex::FieldId& field_id) const {
+ const dex::TypeId& type_id = GetTypeId(field_id.class_idx_);
return GetTypeDescriptor(type_id);
}
// Returns the class descriptor string of a field id.
- const char* GetFieldTypeDescriptor(const FieldId& field_id) const;
+ const char* GetFieldTypeDescriptor(const dex::FieldId& field_id) const;
// Returns the name of a field id.
- const char* GetFieldName(const FieldId& field_id) const;
+ const char* GetFieldName(const dex::FieldId& field_id) const;
// Returns the number of method identifiers in the .dex file.
size_t NumMethodIds() const {
@@ -608,47 +349,47 @@
}
// Returns the MethodId at the specified index.
- const MethodId& GetMethodId(uint32_t idx) const {
+ const dex::MethodId& GetMethodId(uint32_t idx) const {
DCHECK_LT(idx, NumMethodIds()) << GetLocation();
return method_ids_[idx];
}
- uint32_t GetIndexForMethodId(const MethodId& method_id) const {
+ uint32_t GetIndexForMethodId(const dex::MethodId& method_id) const {
CHECK_GE(&method_id, method_ids_) << GetLocation();
CHECK_LT(&method_id, method_ids_ + header_->method_ids_size_) << GetLocation();
return &method_id - method_ids_;
}
// Looks up a method by its declaring class, name and proto_id
- const MethodId* FindMethodId(const DexFile::TypeId& declaring_klass,
- const DexFile::StringId& name,
- const DexFile::ProtoId& signature) const;
+ const dex::MethodId* FindMethodId(const dex::TypeId& declaring_klass,
+ const dex::StringId& name,
+ const dex::ProtoId& signature) const;
// Returns the declaring class descriptor string of a method id.
- const char* GetMethodDeclaringClassDescriptor(const MethodId& method_id) const;
+ const char* GetMethodDeclaringClassDescriptor(const dex::MethodId& method_id) const;
// Returns the prototype of a method id.
- const ProtoId& GetMethodPrototype(const MethodId& method_id) const {
+ const dex::ProtoId& GetMethodPrototype(const dex::MethodId& method_id) const {
return GetProtoId(method_id.proto_idx_);
}
// Returns a representation of the signature of a method id.
- const Signature GetMethodSignature(const MethodId& method_id) const;
+ const Signature GetMethodSignature(const dex::MethodId& method_id) const;
// Returns a representation of the signature of a proto id.
- const Signature GetProtoSignature(const ProtoId& proto_id) const;
+ const Signature GetProtoSignature(const dex::ProtoId& proto_id) const;
// Returns the name of a method id.
- const char* GetMethodName(const MethodId& method_id) const;
- const char* GetMethodName(const MethodId& method_id, uint32_t* utf_length) const;
+ const char* GetMethodName(const dex::MethodId& method_id) const;
+ const char* GetMethodName(const dex::MethodId& method_id, uint32_t* utf_length) const;
const char* GetMethodName(uint32_t idx, uint32_t* utf_length) const;
// Returns the shorty of a method by its index.
const char* GetMethodShorty(uint32_t idx) const;
// Returns the shorty of a method id.
- const char* GetMethodShorty(const MethodId& method_id) const;
- const char* GetMethodShorty(const MethodId& method_id, uint32_t* length) const;
+ const char* GetMethodShorty(const dex::MethodId& method_id) const;
+ const char* GetMethodShorty(const dex::MethodId& method_id, uint32_t* length) const;
// Returns the number of class definitions in the .dex file.
uint32_t NumClassDefs() const {
@@ -657,32 +398,32 @@
}
// Returns the ClassDef at the specified index.
- const ClassDef& GetClassDef(uint16_t idx) const {
+ const dex::ClassDef& GetClassDef(uint16_t idx) const {
DCHECK_LT(idx, NumClassDefs()) << GetLocation();
return class_defs_[idx];
}
- uint16_t GetIndexForClassDef(const ClassDef& class_def) const {
+ uint16_t GetIndexForClassDef(const dex::ClassDef& class_def) const {
CHECK_GE(&class_def, class_defs_) << GetLocation();
CHECK_LT(&class_def, class_defs_ + header_->class_defs_size_) << GetLocation();
return &class_def - class_defs_;
}
// Returns the class descriptor string of a class definition.
- const char* GetClassDescriptor(const ClassDef& class_def) const;
+ const char* GetClassDescriptor(const dex::ClassDef& class_def) const;
// Looks up a class definition by its type index.
- const ClassDef* FindClassDef(dex::TypeIndex type_idx) const;
+ const dex::ClassDef* FindClassDef(dex::TypeIndex type_idx) const;
- const TypeList* GetInterfacesList(const ClassDef& class_def) const {
- return DataPointer<TypeList>(class_def.interfaces_off_);
+ const dex::TypeList* GetInterfacesList(const dex::ClassDef& class_def) const {
+ return DataPointer<dex::TypeList>(class_def.interfaces_off_);
}
uint32_t NumMethodHandles() const {
return num_method_handles_;
}
- const MethodHandleItem& GetMethodHandle(uint32_t idx) const {
+ const dex::MethodHandleItem& GetMethodHandle(uint32_t idx) const {
CHECK_LT(idx, NumMethodHandles());
return method_handles_[idx];
}
@@ -691,23 +432,23 @@
return num_call_site_ids_;
}
- const CallSiteIdItem& GetCallSiteId(uint32_t idx) const {
+ const dex::CallSiteIdItem& GetCallSiteId(uint32_t idx) const {
CHECK_LT(idx, NumCallSiteIds());
return call_site_ids_[idx];
}
// Returns a pointer to the raw memory mapped class_data_item
- const uint8_t* GetClassData(const ClassDef& class_def) const {
+ const uint8_t* GetClassData(const dex::ClassDef& class_def) const {
return DataPointer<uint8_t>(class_def.class_data_off_);
}
// Return the code item for a provided offset.
- const CodeItem* GetCodeItem(const uint32_t code_off) const {
+ const dex::CodeItem* GetCodeItem(const uint32_t code_off) const {
// May be null for native or abstract methods.
- return DataPointer<CodeItem>(code_off);
+ return DataPointer<dex::CodeItem>(code_off);
}
- const char* GetReturnTypeDescriptor(const ProtoId& proto_id) const;
+ const char* GetReturnTypeDescriptor(const dex::ProtoId& proto_id) const;
// Returns the number of prototype identifiers in the .dex file.
size_t NumProtoIds() const {
@@ -716,23 +457,23 @@
}
// Returns the ProtoId at the specified index.
- const ProtoId& GetProtoId(dex::ProtoIndex idx) const {
+ const dex::ProtoId& GetProtoId(dex::ProtoIndex idx) const {
DCHECK_LT(idx.index_, NumProtoIds()) << GetLocation();
return proto_ids_[idx.index_];
}
- dex::ProtoIndex GetIndexForProtoId(const ProtoId& proto_id) const {
+ dex::ProtoIndex GetIndexForProtoId(const dex::ProtoId& proto_id) const {
CHECK_GE(&proto_id, proto_ids_) << GetLocation();
CHECK_LT(&proto_id, proto_ids_ + header_->proto_ids_size_) << GetLocation();
return dex::ProtoIndex(&proto_id - proto_ids_);
}
// Looks up a proto id for a given return type and signature type list
- const ProtoId* FindProtoId(dex::TypeIndex return_type_idx,
- const dex::TypeIndex* signature_type_idxs,
+ const dex::ProtoId* FindProtoId(dex::TypeIndex return_type_idx,
+ const dex::TypeIndex* signature_type_idxs,
uint32_t signature_length) const;
- const ProtoId* FindProtoId(dex::TypeIndex return_type_idx,
- const std::vector<dex::TypeIndex>& signature_type_idxs) const {
+ const dex::ProtoId* FindProtoId(dex::TypeIndex return_type_idx,
+ const std::vector<dex::TypeIndex>& signature_type_idxs) const {
return FindProtoId(return_type_idx, &signature_type_idxs[0], signature_type_idxs.size());
}
@@ -748,21 +489,22 @@
// Returns the short form method descriptor for the given prototype.
const char* GetShorty(dex::ProtoIndex proto_idx) const;
- const TypeList* GetProtoParameters(const ProtoId& proto_id) const {
- return DataPointer<TypeList>(proto_id.parameters_off_);
+ const dex::TypeList* GetProtoParameters(const dex::ProtoId& proto_id) const {
+ return DataPointer<dex::TypeList>(proto_id.parameters_off_);
}
- const uint8_t* GetEncodedStaticFieldValuesArray(const ClassDef& class_def) const {
+ const uint8_t* GetEncodedStaticFieldValuesArray(const dex::ClassDef& class_def) const {
return DataPointer<uint8_t>(class_def.static_values_off_);
}
- const uint8_t* GetCallSiteEncodedValuesArray(const CallSiteIdItem& call_site_id) const {
+ const uint8_t* GetCallSiteEncodedValuesArray(const dex::CallSiteIdItem& call_site_id) const {
return DataBegin() + call_site_id.data_off_;
}
dex::ProtoIndex GetProtoIndexForCallSite(uint32_t call_site_idx) const;
- static const TryItem* GetTryItems(const DexInstructionIterator& code_item_end, uint32_t offset);
+ static const dex::TryItem* GetTryItems(const DexInstructionIterator& code_item_end,
+ uint32_t offset);
// Get the base of the encoded data for the given DexCode.
static const uint8_t* GetCatchHandlerData(const DexInstructionIterator& code_item_end,
@@ -770,7 +512,7 @@
uint32_t offset);
// Find which try region is associated with the given address (ie dex pc). Returns -1 if none.
- static int32_t FindTryItem(const TryItem* try_items, uint32_t tries_size, uint32_t address);
+ static int32_t FindTryItem(const dex::TryItem* try_items, uint32_t tries_size, uint32_t address);
// Get the pointer to the start of the debugging data
const uint8_t* GetDebugInfoStream(uint32_t debug_info_off) const {
@@ -807,76 +549,83 @@
// Callback for "new locals table entry".
typedef void (*DexDebugNewLocalCb)(void* context, const LocalInfo& entry);
- const AnnotationsDirectoryItem* GetAnnotationsDirectory(const ClassDef& class_def) const {
- return DataPointer<AnnotationsDirectoryItem>(class_def.annotations_off_);
+ const dex::AnnotationsDirectoryItem* GetAnnotationsDirectory(const dex::ClassDef& class_def)
+ const {
+ return DataPointer<dex::AnnotationsDirectoryItem>(class_def.annotations_off_);
}
- const AnnotationSetItem* GetClassAnnotationSet(const AnnotationsDirectoryItem* anno_dir) const {
- return DataPointer<AnnotationSetItem>(anno_dir->class_annotations_off_);
+ const dex::AnnotationSetItem* GetClassAnnotationSet(const dex::AnnotationsDirectoryItem* anno_dir)
+ const {
+ return DataPointer<dex::AnnotationSetItem>(anno_dir->class_annotations_off_);
}
- const FieldAnnotationsItem* GetFieldAnnotations(const AnnotationsDirectoryItem* anno_dir) const {
+ const dex::FieldAnnotationsItem* GetFieldAnnotations(
+ const dex::AnnotationsDirectoryItem* anno_dir) const {
return (anno_dir->fields_size_ == 0)
? nullptr
- : reinterpret_cast<const FieldAnnotationsItem*>(&anno_dir[1]);
+ : reinterpret_cast<const dex::FieldAnnotationsItem*>(&anno_dir[1]);
}
- const MethodAnnotationsItem* GetMethodAnnotations(const AnnotationsDirectoryItem* anno_dir)
- const {
+ const dex::MethodAnnotationsItem* GetMethodAnnotations(
+ const dex::AnnotationsDirectoryItem* anno_dir) const {
if (anno_dir->methods_size_ == 0) {
return nullptr;
}
// Skip past the header and field annotations.
const uint8_t* addr = reinterpret_cast<const uint8_t*>(&anno_dir[1]);
- addr += anno_dir->fields_size_ * sizeof(FieldAnnotationsItem);
- return reinterpret_cast<const MethodAnnotationsItem*>(addr);
+ addr += anno_dir->fields_size_ * sizeof(dex::FieldAnnotationsItem);
+ return reinterpret_cast<const dex::MethodAnnotationsItem*>(addr);
}
- const ParameterAnnotationsItem* GetParameterAnnotations(const AnnotationsDirectoryItem* anno_dir)
- const {
+ const dex::ParameterAnnotationsItem* GetParameterAnnotations(
+ const dex::AnnotationsDirectoryItem* anno_dir) const {
if (anno_dir->parameters_size_ == 0) {
return nullptr;
}
// Skip past the header, field annotations, and method annotations.
const uint8_t* addr = reinterpret_cast<const uint8_t*>(&anno_dir[1]);
- addr += anno_dir->fields_size_ * sizeof(FieldAnnotationsItem);
- addr += anno_dir->methods_size_ * sizeof(MethodAnnotationsItem);
- return reinterpret_cast<const ParameterAnnotationsItem*>(addr);
+ addr += anno_dir->fields_size_ * sizeof(dex::FieldAnnotationsItem);
+ addr += anno_dir->methods_size_ * sizeof(dex::MethodAnnotationsItem);
+ return reinterpret_cast<const dex::ParameterAnnotationsItem*>(addr);
}
- const AnnotationSetItem* GetFieldAnnotationSetItem(const FieldAnnotationsItem& anno_item) const {
- return DataPointer<AnnotationSetItem>(anno_item.annotations_off_);
+ const dex::AnnotationSetItem* GetFieldAnnotationSetItem(
+ const dex::FieldAnnotationsItem& anno_item) const {
+ return DataPointer<dex::AnnotationSetItem>(anno_item.annotations_off_);
}
- const AnnotationSetItem* GetMethodAnnotationSetItem(const MethodAnnotationsItem& anno_item)
+ const dex::AnnotationSetItem* GetMethodAnnotationSetItem(
+ const dex::MethodAnnotationsItem& anno_item) const {
+ return DataPointer<dex::AnnotationSetItem>(anno_item.annotations_off_);
+ }
+
+ const dex::AnnotationSetRefList* GetParameterAnnotationSetRefList(
+ const dex::ParameterAnnotationsItem* anno_item) const {
+ return DataPointer<dex::AnnotationSetRefList>(anno_item->annotations_off_);
+ }
+
+ ALWAYS_INLINE const dex::AnnotationItem* GetAnnotationItemAtOffset(uint32_t offset) const {
+ return DataPointer<dex::AnnotationItem>(offset);
+ }
+
+ ALWAYS_INLINE const dex::HiddenapiClassData* GetHiddenapiClassDataAtOffset(uint32_t offset)
const {
- return DataPointer<AnnotationSetItem>(anno_item.annotations_off_);
+ return DataPointer<dex::HiddenapiClassData>(offset);
}
- const AnnotationSetRefList* GetParameterAnnotationSetRefList(
- const ParameterAnnotationsItem* anno_item) const {
- return DataPointer<AnnotationSetRefList>(anno_item->annotations_off_);
- }
-
- ALWAYS_INLINE const AnnotationItem* GetAnnotationItemAtOffset(uint32_t offset) const {
- return DataPointer<AnnotationItem>(offset);
- }
-
- ALWAYS_INLINE const HiddenapiClassData* GetHiddenapiClassDataAtOffset(uint32_t offset) const {
- return DataPointer<HiddenapiClassData>(offset);
- }
-
- ALWAYS_INLINE const HiddenapiClassData* GetHiddenapiClassData() const {
+ ALWAYS_INLINE const dex::HiddenapiClassData* GetHiddenapiClassData() const {
return hiddenapi_class_data_;
}
- const AnnotationItem* GetAnnotationItem(const AnnotationSetItem* set_item, uint32_t index) const {
+ const dex::AnnotationItem* GetAnnotationItem(const dex::AnnotationSetItem* set_item,
+ uint32_t index) const {
DCHECK_LE(index, set_item->size_);
return GetAnnotationItemAtOffset(set_item->entries_[index]);
}
- const AnnotationSetItem* GetSetRefItemItem(const AnnotationSetRefItem* anno_item) const {
- return DataPointer<AnnotationSetItem>(anno_item->annotations_off_);
+ const dex::AnnotationSetItem* GetSetRefItemItem(const dex::AnnotationSetRefItem* anno_item)
+ const {
+ return DataPointer<dex::AnnotationSetItem>(anno_item->annotations_off_);
}
// Debug info opcodes and constants
@@ -925,7 +674,7 @@
const IndexToStringData& index_to_string_data,
const DexDebugNewPosition& position_functor);
- const char* GetSourceFile(const ClassDef& class_def) const {
+ const char* GetSourceFile(const dex::ClassDef& class_def) const {
if (!class_def.source_file_idx_.IsValid()) {
return nullptr;
} else {
@@ -973,8 +722,8 @@
}
// Read MapItems and validate/set remaining offsets.
- const DexFile::MapList* GetMapList() const {
- return reinterpret_cast<const DexFile::MapList*>(DataBegin() + header_->map_off_);
+ const dex::MapList* GetMapList() const {
+ return reinterpret_cast<const dex::MapList*>(DataBegin() + header_->map_off_);
}
// Utility methods for reading integral values from a buffer.
@@ -1031,6 +780,9 @@
static uint32_t DecodeDebugInfoParameterNames(const uint8_t** debug_info,
const Visitor& visitor);
+ static inline bool StringEquals(const DexFile* df1, dex::StringIndex sidx1,
+ const DexFile* df2, dex::StringIndex sidx2);
+
protected:
// First Dex format version supporting default methods.
static const uint32_t kDefaultMethodsVersion = 37;
@@ -1078,38 +830,38 @@
const Header* const header_;
// Points to the base of the string identifier list.
- const StringId* const string_ids_;
+ const dex::StringId* const string_ids_;
// Points to the base of the type identifier list.
- const TypeId* const type_ids_;
+ const dex::TypeId* const type_ids_;
// Points to the base of the field identifier list.
- const FieldId* const field_ids_;
+ const dex::FieldId* const field_ids_;
// Points to the base of the method identifier list.
- const MethodId* const method_ids_;
+ const dex::MethodId* const method_ids_;
// Points to the base of the prototype identifier list.
- const ProtoId* const proto_ids_;
+ const dex::ProtoId* const proto_ids_;
// Points to the base of the class definition list.
- const ClassDef* const class_defs_;
+ const dex::ClassDef* const class_defs_;
// Points to the base of the method handles list.
- const MethodHandleItem* method_handles_;
+ const dex::MethodHandleItem* method_handles_;
// Number of elements in the method handles list.
size_t num_method_handles_;
// Points to the base of the call sites id list.
- const CallSiteIdItem* call_site_ids_;
+ const dex::CallSiteIdItem* call_site_ids_;
// Number of elements in the call sites list.
size_t num_call_site_ids_;
// Points to the base of the hiddenapi class data item_, or nullptr if the dex
// file does not have one.
- const HiddenapiClassData* hiddenapi_class_data_;
+ const dex::HiddenapiClassData* hiddenapi_class_data_;
// If this dex file was loaded from an oat file, oat_dex_file_ contains a
// pointer to the OatDexFile it was loaded from. Otherwise oat_dex_file_ is
@@ -1135,7 +887,7 @@
// Iterate over a dex file's ProtoId's paramters
class DexFileParameterIterator {
public:
- DexFileParameterIterator(const DexFile& dex_file, const DexFile::ProtoId& proto_id)
+ DexFileParameterIterator(const DexFile& dex_file, const dex::ProtoId& proto_id)
: dex_file_(dex_file) {
type_list_ = dex_file_.GetProtoParameters(proto_id);
if (type_list_ != nullptr) {
@@ -1153,44 +905,12 @@
}
private:
const DexFile& dex_file_;
- const DexFile::TypeList* type_list_ = nullptr;
+ const dex::TypeList* type_list_ = nullptr;
uint32_t size_ = 0;
uint32_t pos_ = 0;
DISALLOW_IMPLICIT_CONSTRUCTORS(DexFileParameterIterator);
};
-// Abstract the signature of a method.
-class Signature : public ValueObject {
- public:
- std::string ToString() const;
-
- static Signature NoSignature() {
- return Signature();
- }
-
- bool IsVoid() const;
- uint32_t GetNumberOfParameters() const;
-
- bool operator==(const Signature& rhs) const;
- bool operator!=(const Signature& rhs) const {
- return !(*this == rhs);
- }
-
- bool operator==(const StringPiece& rhs) const;
-
- private:
- Signature(const DexFile* dex, const DexFile::ProtoId& proto) : dex_file_(dex), proto_id_(&proto) {
- }
-
- Signature() = default;
-
- friend class DexFile;
-
- const DexFile* const dex_file_ = nullptr;
- const DexFile::ProtoId* const proto_id_ = nullptr;
-};
-std::ostream& operator<<(std::ostream& os, const Signature& sig);
-
class EncodedArrayValueIterator {
public:
EncodedArrayValueIterator(const DexFile& dex_file, const uint8_t* array_data);
@@ -1242,7 +962,7 @@
class EncodedStaticFieldValueIterator : public EncodedArrayValueIterator {
public:
EncodedStaticFieldValueIterator(const DexFile& dex_file,
- const DexFile::ClassDef& class_def)
+ const dex::ClassDef& class_def)
: EncodedArrayValueIterator(dex_file,
dex_file.GetEncodedStaticFieldValuesArray(class_def))
{}
@@ -1255,7 +975,7 @@
class CallSiteArrayValueIterator : public EncodedArrayValueIterator {
public:
CallSiteArrayValueIterator(const DexFile& dex_file,
- const DexFile::CallSiteIdItem& call_site_id)
+ const dex::CallSiteIdItem& call_site_id)
: EncodedArrayValueIterator(dex_file,
dex_file.GetCallSiteEncodedValuesArray(call_site_id))
{}
diff --git a/libdexfile/dex/dex_file_exception_helpers.cc b/libdexfile/dex/dex_file_exception_helpers.cc
index 8e597fd..72b2554 100644
--- a/libdexfile/dex/dex_file_exception_helpers.cc
+++ b/libdexfile/dex/dex_file_exception_helpers.cc
@@ -17,6 +17,7 @@
#include "dex_file_exception_helpers.h"
#include "code_item_accessors-inl.h"
+#include "dex_file_structs.h"
namespace art {
@@ -29,7 +30,7 @@
case 0:
break;
case 1: {
- const DexFile::TryItem* tries = accessor.TryItems().begin();
+ const dex::TryItem* tries = accessor.TryItems().begin();
uint32_t start = tries->start_addr_;
if (address >= start) {
uint32_t end = start + tries->insn_count_;
@@ -40,7 +41,7 @@
break;
}
default: {
- const DexFile::TryItem* try_item = accessor.FindTryItem(address);
+ const dex::TryItem* try_item = accessor.FindTryItem(address);
offset = try_item != nullptr ? try_item->handler_off_ : -1;
break;
}
@@ -49,7 +50,7 @@
}
CatchHandlerIterator::CatchHandlerIterator(const CodeItemDataAccessor& accessor,
- const DexFile::TryItem& try_item) {
+ const dex::TryItem& try_item) {
handler_.address_ = -1;
Init(accessor, try_item.handler_off_);
}
diff --git a/libdexfile/dex/dex_file_exception_helpers.h b/libdexfile/dex/dex_file_exception_helpers.h
index a05fd68..08127c8 100644
--- a/libdexfile/dex/dex_file_exception_helpers.h
+++ b/libdexfile/dex/dex_file_exception_helpers.h
@@ -17,17 +17,23 @@
#ifndef ART_LIBDEXFILE_DEX_DEX_FILE_EXCEPTION_HELPERS_H_
#define ART_LIBDEXFILE_DEX_DEX_FILE_EXCEPTION_HELPERS_H_
-#include "dex_file.h"
+#include <android-base/logging.h>
+
+#include "dex_file_types.h"
namespace art {
+namespace dex {
+struct TryItem;
+} // namespace dex
+
class CodeItemDataAccessor;
class CatchHandlerIterator {
public:
CatchHandlerIterator(const CodeItemDataAccessor& accessor, uint32_t address);
- CatchHandlerIterator(const CodeItemDataAccessor& accessor, const DexFile::TryItem& try_item);
+ CatchHandlerIterator(const CodeItemDataAccessor& accessor, const dex::TryItem& try_item);
explicit CatchHandlerIterator(const uint8_t* handler_data) {
Init(handler_data);
diff --git a/libdexfile/dex/dex_file_loader_test.cc b/libdexfile/dex/dex_file_loader_test.cc
index 9c61d1a..8b7ca17 100644
--- a/libdexfile/dex/dex_file_loader_test.cc
+++ b/libdexfile/dex/dex_file_loader_test.cc
@@ -487,10 +487,9 @@
0xf25f2b38U,
true,
&dex_bytes);
- const DexFile::ClassDef& class_def = raw->GetClassDef(0);
+ const dex::ClassDef& class_def = raw->GetClassDef(0);
constexpr uint32_t kMethodIdx = 1;
- const DexFile::CodeItem* code_item = raw->GetCodeItem(raw->FindCodeItemOffset(class_def,
- kMethodIdx));
+ const dex::CodeItem* code_item = raw->GetCodeItem(raw->FindCodeItemOffset(class_def, kMethodIdx));
CodeItemDebugInfoAccessor accessor(*raw, code_item, kMethodIdx);
ASSERT_TRUE(accessor.DecodeDebugLocalInfo(true, 1, VoidFunctor()));
}
diff --git a/libdexfile/dex/dex_file_structs.h b/libdexfile/dex/dex_file_structs.h
new file mode 100644
index 0000000..2d252270
--- /dev/null
+++ b/libdexfile/dex/dex_file_structs.h
@@ -0,0 +1,298 @@
+/*
+ * Copyright (C) 2011 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef ART_LIBDEXFILE_DEX_DEX_FILE_STRUCTS_H_
+#define ART_LIBDEXFILE_DEX_DEX_FILE_STRUCTS_H_
+
+#include <android-base/logging.h>
+#include <android-base/macros.h>
+
+#include <inttypes.h>
+
+#include "dex_file_types.h"
+#include "modifiers.h"
+
+namespace art {
+
+class DexWriter;
+
+namespace dex {
+
+struct MapItem {
+ uint16_t type_;
+ uint16_t unused_;
+ uint32_t size_;
+ uint32_t offset_;
+};
+
+struct MapList {
+ uint32_t size_;
+ MapItem list_[1];
+
+ size_t Size() const { return sizeof(uint32_t) + (size_ * sizeof(MapItem)); }
+
+ private:
+ DISALLOW_COPY_AND_ASSIGN(MapList);
+};
+
+// Raw string_id_item.
+struct StringId {
+ uint32_t string_data_off_; // offset in bytes from the base address
+
+ private:
+ DISALLOW_COPY_AND_ASSIGN(StringId);
+};
+
+// Raw type_id_item.
+struct TypeId {
+ dex::StringIndex descriptor_idx_; // index into string_ids
+
+ private:
+ DISALLOW_COPY_AND_ASSIGN(TypeId);
+};
+
+// Raw field_id_item.
+struct FieldId {
+ dex::TypeIndex class_idx_; // index into type_ids_ array for defining class
+ dex::TypeIndex type_idx_; // index into type_ids_ array for field type
+ dex::StringIndex name_idx_; // index into string_ids_ array for field name
+
+ private:
+ DISALLOW_COPY_AND_ASSIGN(FieldId);
+};
+
+// Raw proto_id_item.
+struct ProtoId {
+ dex::StringIndex shorty_idx_; // index into string_ids array for shorty descriptor
+ dex::TypeIndex return_type_idx_; // index into type_ids array for return type
+ uint16_t pad_; // padding = 0
+ uint32_t parameters_off_; // file offset to type_list for parameter types
+
+ private:
+ DISALLOW_COPY_AND_ASSIGN(ProtoId);
+};
+
+// Raw method_id_item.
+struct MethodId {
+ dex::TypeIndex class_idx_; // index into type_ids_ array for defining class
+ dex::ProtoIndex proto_idx_; // index into proto_ids_ array for method prototype
+ dex::StringIndex name_idx_; // index into string_ids_ array for method name
+
+ private:
+ DISALLOW_COPY_AND_ASSIGN(MethodId);
+};
+
+// Base code_item, compact dex and standard dex have different code item layouts.
+struct CodeItem {
+ protected:
+ CodeItem() = default;
+
+ private:
+ DISALLOW_COPY_AND_ASSIGN(CodeItem);
+};
+
+// Raw class_def_item.
+struct ClassDef {
+ dex::TypeIndex class_idx_; // index into type_ids_ array for this class
+ uint16_t pad1_; // padding = 0
+ uint32_t access_flags_;
+ dex::TypeIndex superclass_idx_; // index into type_ids_ array for superclass
+ uint16_t pad2_; // padding = 0
+ uint32_t interfaces_off_; // file offset to TypeList
+ dex::StringIndex source_file_idx_; // index into string_ids_ for source file name
+ uint32_t annotations_off_; // file offset to annotations_directory_item
+ uint32_t class_data_off_; // file offset to class_data_item
+ uint32_t static_values_off_; // file offset to EncodedArray
+
+ // Returns the valid access flags, that is, Java modifier bits relevant to the ClassDef type
+ // (class or interface). These are all in the lower 16b and do not contain runtime flags.
+ uint32_t GetJavaAccessFlags() const {
+ // Make sure that none of our runtime-only flags are set.
+ static_assert((kAccValidClassFlags & kAccJavaFlagsMask) == kAccValidClassFlags,
+ "Valid class flags not a subset of Java flags");
+ static_assert((kAccValidInterfaceFlags & kAccJavaFlagsMask) == kAccValidInterfaceFlags,
+ "Valid interface flags not a subset of Java flags");
+
+ if ((access_flags_ & kAccInterface) != 0) {
+ // Interface.
+ return access_flags_ & kAccValidInterfaceFlags;
+ } else {
+ // Class.
+ return access_flags_ & kAccValidClassFlags;
+ }
+ }
+
+ private:
+ DISALLOW_COPY_AND_ASSIGN(ClassDef);
+};
+
+// Raw type_item.
+struct TypeItem {
+ dex::TypeIndex type_idx_; // index into type_ids section
+
+ private:
+ DISALLOW_COPY_AND_ASSIGN(TypeItem);
+};
+
+// Raw type_list.
+class TypeList {
+ public:
+ uint32_t Size() const {
+ return size_;
+ }
+
+ const TypeItem& GetTypeItem(uint32_t idx) const {
+ DCHECK_LT(idx, this->size_);
+ return this->list_[idx];
+ }
+
+ // Size in bytes of the part of the list that is common.
+ static constexpr size_t GetHeaderSize() {
+ return 4U;
+ }
+
+ // Size in bytes of the whole type list including all the stored elements.
+ static constexpr size_t GetListSize(size_t count) {
+ return GetHeaderSize() + sizeof(TypeItem) * count;
+ }
+
+ private:
+ uint32_t size_; // size of the list, in entries
+ TypeItem list_[1]; // elements of the list
+ DISALLOW_COPY_AND_ASSIGN(TypeList);
+};
+
+// raw method_handle_item
+struct MethodHandleItem {
+ uint16_t method_handle_type_;
+ uint16_t reserved1_; // Reserved for future use.
+ uint16_t field_or_method_idx_; // Field index for accessors, method index otherwise.
+ uint16_t reserved2_; // Reserved for future use.
+ private:
+ DISALLOW_COPY_AND_ASSIGN(MethodHandleItem);
+};
+
+// raw call_site_id_item
+struct CallSiteIdItem {
+ uint32_t data_off_; // Offset into data section pointing to encoded array items.
+ private:
+ DISALLOW_COPY_AND_ASSIGN(CallSiteIdItem);
+};
+
+// Raw try_item.
+struct TryItem {
+ static constexpr size_t kAlignment = sizeof(uint32_t);
+
+ uint32_t start_addr_;
+ uint16_t insn_count_;
+ uint16_t handler_off_;
+
+ private:
+ TryItem() = default;
+ friend class ::art::DexWriter;
+ DISALLOW_COPY_AND_ASSIGN(TryItem);
+};
+
+struct AnnotationsDirectoryItem {
+ uint32_t class_annotations_off_;
+ uint32_t fields_size_;
+ uint32_t methods_size_;
+ uint32_t parameters_size_;
+
+ private:
+ DISALLOW_COPY_AND_ASSIGN(AnnotationsDirectoryItem);
+};
+
+struct FieldAnnotationsItem {
+ uint32_t field_idx_;
+ uint32_t annotations_off_;
+
+ private:
+ DISALLOW_COPY_AND_ASSIGN(FieldAnnotationsItem);
+};
+
+struct MethodAnnotationsItem {
+ uint32_t method_idx_;
+ uint32_t annotations_off_;
+
+ private:
+ DISALLOW_COPY_AND_ASSIGN(MethodAnnotationsItem);
+};
+
+struct ParameterAnnotationsItem {
+ uint32_t method_idx_;
+ uint32_t annotations_off_;
+
+ private:
+ DISALLOW_COPY_AND_ASSIGN(ParameterAnnotationsItem);
+};
+
+struct AnnotationSetRefItem {
+ uint32_t annotations_off_;
+
+ private:
+ DISALLOW_COPY_AND_ASSIGN(AnnotationSetRefItem);
+};
+
+struct AnnotationSetRefList {
+ uint32_t size_;
+ AnnotationSetRefItem list_[1];
+
+ private:
+ DISALLOW_COPY_AND_ASSIGN(AnnotationSetRefList);
+};
+
+struct AnnotationSetItem {
+ uint32_t size_;
+ uint32_t entries_[1];
+
+ private:
+ DISALLOW_COPY_AND_ASSIGN(AnnotationSetItem);
+};
+
+struct AnnotationItem {
+ uint8_t visibility_;
+ uint8_t annotation_[1];
+
+ private:
+ DISALLOW_COPY_AND_ASSIGN(AnnotationItem);
+};
+
+struct HiddenapiClassData {
+ uint32_t size_; // total size of the item
+ uint32_t flags_offset_[1]; // array of offsets from the beginning of this item,
+ // indexed by class def index
+
+ // Returns a pointer to the beginning of a uleb128-stream of hiddenapi
+ // flags for a class def of given index. Values are in the same order
+ // as fields/methods in the class data. Returns null if the class does
+ // not have class data.
+ const uint8_t* GetFlagsPointer(uint32_t class_def_idx) const {
+ if (flags_offset_[class_def_idx] == 0) {
+ return nullptr;
+ } else {
+ return reinterpret_cast<const uint8_t*>(this) + flags_offset_[class_def_idx];
+ }
+ }
+
+ private:
+ DISALLOW_COPY_AND_ASSIGN(HiddenapiClassData);
+};
+
+} // namespace dex
+} // namespace art
+
+#endif // ART_LIBDEXFILE_DEX_DEX_FILE_STRUCTS_H_
diff --git a/libdexfile/dex/dex_file_tracking_registrar.cc b/libdexfile/dex/dex_file_tracking_registrar.cc
index 29ff6be..1903dc9 100644
--- a/libdexfile/dex/dex_file_tracking_registrar.cc
+++ b/libdexfile/dex/dex_file_tracking_registrar.cc
@@ -158,7 +158,7 @@
void DexFileTrackingRegistrar::SetAllCodeItemRegistration(bool should_poison) {
for (ClassAccessor accessor : dex_file_->GetClasses()) {
for (const ClassAccessor::Method& method : accessor.GetMethods()) {
- const DexFile::CodeItem* code_item = method.GetCodeItem();
+ const dex::CodeItem* code_item = method.GetCodeItem();
if (code_item != nullptr) {
const void* code_item_begin = reinterpret_cast<const void*>(code_item);
size_t code_item_size = dex_file_->GetCodeItemSize(*code_item);
@@ -171,7 +171,7 @@
void DexFileTrackingRegistrar::SetAllCodeItemStartRegistration(bool should_poison) {
for (ClassAccessor class_accessor : dex_file_->GetClasses()) {
for (const ClassAccessor::Method& method : class_accessor.GetMethods()) {
- const DexFile::CodeItem* code_item = method.GetCodeItem();
+ const dex::CodeItem* code_item = method.GetCodeItem();
if (code_item != nullptr) {
const void* code_item_begin = reinterpret_cast<const void*>(code_item);
size_t code_item_start = reinterpret_cast<size_t>(code_item);
@@ -189,7 +189,7 @@
void DexFileTrackingRegistrar::SetAllInsnsRegistration(bool should_poison) {
for (ClassAccessor class_accessor : dex_file_->GetClasses()) {
for (const ClassAccessor::Method& method : class_accessor.GetMethods()) {
- const DexFile::CodeItem* code_item = method.GetCodeItem();
+ const dex::CodeItem* code_item = method.GetCodeItem();
if (code_item != nullptr) {
CodeItemInstructionAccessor accessor(*dex_file_, code_item);
const void* insns_begin = reinterpret_cast<const void*>(accessor.Insns());
@@ -204,9 +204,9 @@
void DexFileTrackingRegistrar::SetCodeItemRegistration(const char* class_name, bool should_poison) {
for (ClassAccessor accessor : dex_file_->GetClasses()) {
for (const ClassAccessor::Method& method : accessor.GetMethods()) {
- const DexFile::MethodId& methodid_item = dex_file_->GetMethodId(method.GetIndex());
+ const dex::MethodId& methodid_item = dex_file_->GetMethodId(method.GetIndex());
const char * methodid_name = dex_file_->GetMethodName(methodid_item);
- const DexFile::CodeItem* code_item = method.GetCodeItem();
+ const dex::CodeItem* code_item = method.GetCodeItem();
if (code_item != nullptr && strcmp(methodid_name, class_name) == 0) {
const void* code_item_begin = reinterpret_cast<const void*>(code_item);
size_t code_item_size = dex_file_->GetCodeItemSize(*code_item);
@@ -218,7 +218,7 @@
void DexFileTrackingRegistrar::SetAllStringDataStartRegistration(bool should_poison) {
for (size_t stringid_ctr = 0; stringid_ctr < dex_file_->NumStringIds(); ++stringid_ctr) {
- const DexFile::StringId & string_id = dex_file_->GetStringId(StringIndex(stringid_ctr));
+ const dex::StringId & string_id = dex_file_->GetStringId(StringIndex(stringid_ctr));
const void* string_data_begin = reinterpret_cast<const void*>(dex_file_->Begin() + string_id.string_data_off_);
// Data Section of String Data Item
const void* string_data_data_begin = reinterpret_cast<const void*>(dex_file_->GetStringData(string_id));
@@ -229,11 +229,11 @@
void DexFileTrackingRegistrar::SetAllStringDataRegistration(bool should_poison) {
size_t map_offset = dex_file_->GetHeader().map_off_;
- auto map_list = reinterpret_cast<const DexFile::MapList*>(dex_file_->Begin() + map_offset);
+ auto map_list = reinterpret_cast<const dex::MapList*>(dex_file_->Begin() + map_offset);
for (size_t map_ctr = 0; map_ctr < map_list->size_; ++map_ctr) {
- const DexFile::MapItem& map_item = map_list->list_[map_ctr];
+ const dex::MapItem& map_item = map_list->list_[map_ctr];
if (map_item.type_ == DexFile::kDexTypeStringDataItem) {
- const DexFile::MapItem& next_map_item = map_list->list_[map_ctr + 1];
+ const dex::MapItem& next_map_item = map_list->list_[map_ctr + 1];
const void* string_data_begin = reinterpret_cast<const void*>(dex_file_->Begin() + map_item.offset_);
size_t string_data_size = next_map_item.offset_ - map_item.offset_;
range_values_.push_back(std::make_tuple(string_data_begin, string_data_size, should_poison));
diff --git a/libdexfile/dex/dex_file_verifier.cc b/libdexfile/dex/dex_file_verifier.cc
index 78e4618..f376c4d 100644
--- a/libdexfile/dex/dex_file_verifier.cc
+++ b/libdexfile/dex/dex_file_verifier.cc
@@ -116,22 +116,22 @@
return CheckLoadStringByIdx(dex_file_->GetTypeId(type_idx).descriptor_idx_, error_string);
}
-const DexFile::FieldId* DexFileVerifier::CheckLoadFieldId(uint32_t idx, const char* error_string) {
+const dex::FieldId* DexFileVerifier::CheckLoadFieldId(uint32_t idx, const char* error_string) {
if (UNLIKELY(!CheckIndex(idx, dex_file_->NumFieldIds(), error_string))) {
return nullptr;
}
return &dex_file_->GetFieldId(idx);
}
-const DexFile::MethodId* DexFileVerifier::CheckLoadMethodId(uint32_t idx, const char* err_string) {
+const dex::MethodId* DexFileVerifier::CheckLoadMethodId(uint32_t idx, const char* err_string) {
if (UNLIKELY(!CheckIndex(idx, dex_file_->NumMethodIds(), err_string))) {
return nullptr;
}
return &dex_file_->GetMethodId(idx);
}
-const DexFile::ProtoId* DexFileVerifier::CheckLoadProtoId(dex::ProtoIndex idx,
- const char* err_string) {
+const dex::ProtoId* DexFileVerifier::CheckLoadProtoId(dex::ProtoIndex idx,
+ const char* err_string) {
if (UNLIKELY(!CheckIndex(idx.index_, dex_file_->NumProtoIds(), err_string))) {
return nullptr;
}
@@ -154,14 +154,14 @@
// Helper macro to load method id. Return last parameter on error.
#define LOAD_METHOD(var, idx, error_string, error_stmt) \
- const DexFile::MethodId* (var) = CheckLoadMethodId(idx, error_string); \
+ const dex::MethodId* (var) = CheckLoadMethodId(idx, error_string); \
if (UNLIKELY((var) == nullptr)) { \
error_stmt; \
}
// Helper macro to load method id. Return last parameter on error.
#define LOAD_FIELD(var, idx, fmt, error_stmt) \
- const DexFile::FieldId* (var) = CheckLoadFieldId(idx, fmt); \
+ const dex::FieldId* (var) = CheckLoadFieldId(idx, fmt); \
if (UNLIKELY((var) == nullptr)) { \
error_stmt; \
}
@@ -385,14 +385,13 @@
}
bool DexFileVerifier::CheckMap() {
- const DexFile::MapList* map = reinterpret_cast<const DexFile::MapList*>(begin_ +
- header_->map_off_);
+ const dex::MapList* map = reinterpret_cast<const dex::MapList*>(begin_ + header_->map_off_);
// Check that map list content is available.
- if (!CheckListSize(map, 1, sizeof(DexFile::MapList), "maplist content")) {
+ if (!CheckListSize(map, 1, sizeof(dex::MapList), "maplist content")) {
return false;
}
- const DexFile::MapItem* item = map->list_;
+ const dex::MapItem* item = map->list_;
uint32_t count = map->size_;
uint32_t last_offset = 0;
@@ -402,7 +401,7 @@
uint32_t used_bits = 0;
// Sanity check the size of the map list.
- if (!CheckListSize(item, count, sizeof(DexFile::MapItem), "map size")) {
+ if (!CheckListSize(item, count, sizeof(dex::MapItem), "map size")) {
return false;
}
@@ -526,8 +525,9 @@
return false; \
}
-bool DexFileVerifier::CheckAndGetHandlerOffsets(const DexFile::CodeItem* code_item,
- uint32_t* handler_offsets, uint32_t handlers_size) {
+bool DexFileVerifier::CheckAndGetHandlerOffsets(const dex::CodeItem* code_item,
+ uint32_t* handler_offsets,
+ uint32_t handlers_size) {
CodeItemDataAccessor accessor(*dex_file_, code_item);
const uint8_t* handlers_base = accessor.GetCatchHandlerData();
@@ -587,8 +587,7 @@
// Check that it's the right class.
dex::TypeIndex my_class_index =
- (reinterpret_cast<const DexFile::FieldId*>(begin_ + header_->field_ids_off_) + idx)->
- class_idx_;
+ (reinterpret_cast<const dex::FieldId*>(begin_ + header_->field_ids_off_) + idx)->class_idx_;
if (class_type_index != my_class_index) {
ErrorStringPrintf("Field's class index unexpected, %" PRIu16 "vs %" PRIu16,
my_class_index.index_,
@@ -625,8 +624,8 @@
return false;
}
- const DexFile::MethodId& method_id =
- *(reinterpret_cast<const DexFile::MethodId*>(begin_ + header_->method_ids_off_) + idx);
+ const dex::MethodId& method_id =
+ *(reinterpret_cast<const dex::MethodId*>(begin_ + header_->method_ids_off_) + idx);
// Check that it's the right class.
dex::TypeIndex my_class_index = method_id.class_idx_;
@@ -911,7 +910,7 @@
bool DexFileVerifier::FindClassIndexAndDef(uint32_t index,
bool is_field,
dex::TypeIndex* class_type_index,
- const DexFile::ClassDef** output_class_def) {
+ const dex::ClassDef** output_class_def) {
DCHECK(class_type_index != nullptr);
DCHECK(output_class_def != nullptr);
@@ -923,11 +922,11 @@
// Next get the type index.
if (is_field) {
*class_type_index =
- (reinterpret_cast<const DexFile::FieldId*>(begin_ + header_->field_ids_off_) + index)->
+ (reinterpret_cast<const dex::FieldId*>(begin_ + header_->field_ids_off_) + index)->
class_idx_;
} else {
*class_type_index =
- (reinterpret_cast<const DexFile::MethodId*>(begin_ + header_->method_ids_off_) + index)->
+ (reinterpret_cast<const dex::MethodId*>(begin_ + header_->method_ids_off_) + index)->
class_idx_;
}
@@ -938,10 +937,10 @@
// Now search for the class def. This is basically a specialized version of the DexFile code, as
// we should not trust that this is a valid DexFile just yet.
- const DexFile::ClassDef* class_def_begin =
- reinterpret_cast<const DexFile::ClassDef*>(begin_ + header_->class_defs_off_);
+ const dex::ClassDef* class_def_begin =
+ reinterpret_cast<const dex::ClassDef*>(begin_ + header_->class_defs_off_);
for (size_t i = 0; i < header_->class_defs_size_; ++i) {
- const DexFile::ClassDef* class_def = class_def_begin + i;
+ const dex::ClassDef* class_def = class_def_begin + i;
if (class_def->class_idx_ == *class_type_index) {
*output_class_def = class_def;
return true;
@@ -965,7 +964,7 @@
return true;
}
-bool DexFileVerifier::CheckStaticFieldTypes(const DexFile::ClassDef* class_def) {
+bool DexFileVerifier::CheckStaticFieldTypes(const dex::ClassDef* class_def) {
if (class_def == nullptr) {
return true;
}
@@ -978,7 +977,7 @@
break;
}
uint32_t index = field.GetIndex();
- const DexFile::TypeId& type_id = dex_file_->GetTypeId(dex_file_->GetFieldId(index).type_idx_);
+ const dex::TypeId& type_id = dex_file_->GetTypeId(dex_file_->GetFieldId(index).type_idx_);
const char* field_type_name =
dex_file_->GetStringData(dex_file_->GetStringId(type_id.descriptor_idx_));
Primitive::Type field_type = Primitive::GetType(field_type_name[0]);
@@ -1069,7 +1068,7 @@
ClassAccessor::Field* field,
bool* have_class,
dex::TypeIndex* class_type_index,
- const DexFile::ClassDef** class_def) {
+ const dex::ClassDef** class_def) {
DCHECK(field != nullptr);
constexpr const char* kTypeDescr = kStatic ? "static field" : "instance field";
@@ -1121,7 +1120,7 @@
size_t num_directs,
bool* have_class,
dex::TypeIndex* class_type_index,
- const DexFile::ClassDef** class_def) {
+ const dex::ClassDef** class_def) {
DCHECK(method != nullptr);
const char* kTypeDescr = method->IsStaticOrDirect() ? "direct method" : "virtual method";
@@ -1176,7 +1175,7 @@
// as the lookup is expensive, cache the result.
bool have_class = false;
dex::TypeIndex class_type_index;
- const DexFile::ClassDef* class_def = nullptr;
+ const dex::ClassDef* class_def = nullptr;
ClassAccessor::Field field(*dex_file_, accessor.ptr_pos_);
// Check fields.
@@ -1232,8 +1231,8 @@
}
bool DexFileVerifier::CheckIntraCodeItem() {
- const DexFile::CodeItem* code_item = reinterpret_cast<const DexFile::CodeItem*>(ptr_);
- if (!CheckListSize(code_item, 1, sizeof(DexFile::CodeItem), "code")) {
+ const dex::CodeItem* code_item = reinterpret_cast<const dex::CodeItem*>(ptr_);
+ if (!CheckListSize(code_item, 1, sizeof(dex::CodeItem), "code")) {
return false;
}
@@ -1275,8 +1274,8 @@
return false;
}
- const DexFile::TryItem* try_items = accessor.TryItems().begin();
- if (!CheckListSize(try_items, try_items_size, sizeof(DexFile::TryItem), "try_items size")) {
+ const dex::TryItem* try_items = accessor.TryItems().begin();
+ if (!CheckListSize(try_items, try_items_size, sizeof(dex::TryItem), "try_items size")) {
return false;
}
@@ -1558,8 +1557,7 @@
}
bool DexFileVerifier::CheckIntraHiddenapiClassData() {
- const DexFile::HiddenapiClassData* item =
- reinterpret_cast<const DexFile::HiddenapiClassData*>(ptr_);
+ const dex::HiddenapiClassData* item = reinterpret_cast<const dex::HiddenapiClassData*>(ptr_);
// Check expected header size.
uint32_t num_header_elems = dex_file_->NumClassDefs() + 1;
@@ -1586,7 +1584,7 @@
// Check offsets for each class def.
for (uint32_t i = 0; i < dex_file_->NumClassDefs(); ++i) {
- const DexFile::ClassDef& class_def = dex_file_->GetClassDef(i);
+ const dex::ClassDef& class_def = dex_file_->GetClassDef(i);
const uint8_t* class_data = dex_file_->GetClassData(class_def);
uint32_t offset = item->flags_offset_[i];
@@ -1659,24 +1657,28 @@
}
bool DexFileVerifier::CheckIntraAnnotationsDirectoryItem() {
- const DexFile::AnnotationsDirectoryItem* item =
- reinterpret_cast<const DexFile::AnnotationsDirectoryItem*>(ptr_);
- if (!CheckListSize(item, 1, sizeof(DexFile::AnnotationsDirectoryItem), "annotations_directory")) {
+ const dex::AnnotationsDirectoryItem* item =
+ reinterpret_cast<const dex::AnnotationsDirectoryItem*>(ptr_);
+ if (!CheckListSize(item, 1, sizeof(dex::AnnotationsDirectoryItem), "annotations_directory")) {
return false;
}
// Field annotations follow immediately after the annotations directory.
- const DexFile::FieldAnnotationsItem* field_item =
- reinterpret_cast<const DexFile::FieldAnnotationsItem*>(item + 1);
+ const dex::FieldAnnotationsItem* field_item =
+ reinterpret_cast<const dex::FieldAnnotationsItem*>(item + 1);
uint32_t field_count = item->fields_size_;
- if (!CheckListSize(field_item, field_count, sizeof(DexFile::FieldAnnotationsItem), "field_annotations list")) {
+ if (!CheckListSize(field_item,
+ field_count,
+ sizeof(dex::FieldAnnotationsItem),
+ "field_annotations list")) {
return false;
}
uint32_t last_idx = 0;
for (uint32_t i = 0; i < field_count; i++) {
if (UNLIKELY(last_idx >= field_item->field_idx_ && i != 0)) {
- ErrorStringPrintf("Out-of-order field_idx for annotation: %x then %x", last_idx, field_item->field_idx_);
+ ErrorStringPrintf("Out-of-order field_idx for annotation: %x then %x",
+ last_idx, field_item->field_idx_);
return false;
}
last_idx = field_item->field_idx_;
@@ -1684,10 +1686,13 @@
}
// Method annotations follow immediately after field annotations.
- const DexFile::MethodAnnotationsItem* method_item =
- reinterpret_cast<const DexFile::MethodAnnotationsItem*>(field_item);
+ const dex::MethodAnnotationsItem* method_item =
+ reinterpret_cast<const dex::MethodAnnotationsItem*>(field_item);
uint32_t method_count = item->methods_size_;
- if (!CheckListSize(method_item, method_count, sizeof(DexFile::MethodAnnotationsItem), "method_annotations list")) {
+ if (!CheckListSize(method_item,
+ method_count,
+ sizeof(dex::MethodAnnotationsItem),
+ "method_annotations list")) {
return false;
}
@@ -1703,10 +1708,10 @@
}
// Parameter annotations follow immediately after method annotations.
- const DexFile::ParameterAnnotationsItem* parameter_item =
- reinterpret_cast<const DexFile::ParameterAnnotationsItem*>(method_item);
+ const dex::ParameterAnnotationsItem* parameter_item =
+ reinterpret_cast<const dex::ParameterAnnotationsItem*>(method_item);
uint32_t parameter_count = item->parameters_size_;
- if (!CheckListSize(parameter_item, parameter_count, sizeof(DexFile::ParameterAnnotationsItem),
+ if (!CheckListSize(parameter_item, parameter_count, sizeof(dex::ParameterAnnotationsItem),
"parameter_annotations list")) {
return false;
}
@@ -1757,69 +1762,69 @@
const uint8_t* start_ptr = ptr_;
switch (kType) {
case DexFile::kDexTypeStringIdItem: {
- if (!CheckListSize(ptr_, 1, sizeof(DexFile::StringId), "string_ids")) {
+ if (!CheckListSize(ptr_, 1, sizeof(dex::StringId), "string_ids")) {
return false;
}
- ptr_ += sizeof(DexFile::StringId);
+ ptr_ += sizeof(dex::StringId);
break;
}
case DexFile::kDexTypeTypeIdItem: {
- if (!CheckListSize(ptr_, 1, sizeof(DexFile::TypeId), "type_ids")) {
+ if (!CheckListSize(ptr_, 1, sizeof(dex::TypeId), "type_ids")) {
return false;
}
- ptr_ += sizeof(DexFile::TypeId);
+ ptr_ += sizeof(dex::TypeId);
break;
}
case DexFile::kDexTypeProtoIdItem: {
- if (!CheckListSize(ptr_, 1, sizeof(DexFile::ProtoId), "proto_ids")) {
+ if (!CheckListSize(ptr_, 1, sizeof(dex::ProtoId), "proto_ids")) {
return false;
}
- ptr_ += sizeof(DexFile::ProtoId);
+ ptr_ += sizeof(dex::ProtoId);
break;
}
case DexFile::kDexTypeFieldIdItem: {
- if (!CheckListSize(ptr_, 1, sizeof(DexFile::FieldId), "field_ids")) {
+ if (!CheckListSize(ptr_, 1, sizeof(dex::FieldId), "field_ids")) {
return false;
}
- ptr_ += sizeof(DexFile::FieldId);
+ ptr_ += sizeof(dex::FieldId);
break;
}
case DexFile::kDexTypeMethodIdItem: {
- if (!CheckListSize(ptr_, 1, sizeof(DexFile::MethodId), "method_ids")) {
+ if (!CheckListSize(ptr_, 1, sizeof(dex::MethodId), "method_ids")) {
return false;
}
- ptr_ += sizeof(DexFile::MethodId);
+ ptr_ += sizeof(dex::MethodId);
break;
}
case DexFile::kDexTypeClassDefItem: {
- if (!CheckListSize(ptr_, 1, sizeof(DexFile::ClassDef), "class_defs")) {
+ if (!CheckListSize(ptr_, 1, sizeof(dex::ClassDef), "class_defs")) {
return false;
}
- ptr_ += sizeof(DexFile::ClassDef);
+ ptr_ += sizeof(dex::ClassDef);
break;
}
case DexFile::kDexTypeCallSiteIdItem: {
- if (!CheckListSize(ptr_, 1, sizeof(DexFile::CallSiteIdItem), "call_site_ids")) {
+ if (!CheckListSize(ptr_, 1, sizeof(dex::CallSiteIdItem), "call_site_ids")) {
return false;
}
- ptr_ += sizeof(DexFile::CallSiteIdItem);
+ ptr_ += sizeof(dex::CallSiteIdItem);
break;
}
case DexFile::kDexTypeMethodHandleItem: {
- if (!CheckListSize(ptr_, 1, sizeof(DexFile::MethodHandleItem), "method_handles")) {
+ if (!CheckListSize(ptr_, 1, sizeof(dex::MethodHandleItem), "method_handles")) {
return false;
}
- ptr_ += sizeof(DexFile::MethodHandleItem);
+ ptr_ += sizeof(dex::MethodHandleItem);
break;
}
case DexFile::kDexTypeTypeList: {
- if (!CheckList(sizeof(DexFile::TypeItem), "type_list", &ptr_)) {
+ if (!CheckList(sizeof(dex::TypeItem), "type_list", &ptr_)) {
return false;
}
break;
}
case DexFile::kDexTypeAnnotationSetRefList: {
- if (!CheckList(sizeof(DexFile::AnnotationSetRefItem), "annotation_set_ref_list", &ptr_)) {
+ if (!CheckList(sizeof(dex::AnnotationSetRefItem), "annotation_set_ref_list", &ptr_)) {
return false;
}
break;
@@ -1986,9 +1991,8 @@
}
bool DexFileVerifier::CheckIntraSection() {
- const DexFile::MapList* map =
- reinterpret_cast<const DexFile::MapList*>(begin_ + header_->map_off_);
- const DexFile::MapItem* item = map->list_;
+ const dex::MapList* map = reinterpret_cast<const dex::MapList*>(begin_ + header_->map_off_);
+ const dex::MapItem* item = map->list_;
size_t offset = 0;
uint32_t count = map->size_;
ptr_ = begin_;
@@ -2052,8 +2056,8 @@
section_offset, header_->map_off_);
return false;
}
- ptr_ += sizeof(uint32_t) + (map->size_ * sizeof(DexFile::MapItem));
- offset = section_offset + sizeof(uint32_t) + (map->size_ * sizeof(DexFile::MapItem));
+ ptr_ += sizeof(uint32_t) + (map->size_ * sizeof(dex::MapItem));
+ offset = section_offset + sizeof(uint32_t) + (map->size_ * sizeof(dex::MapItem));
break;
#define CHECK_INTRA_SECTION_ITERATE_CASE(type) \
@@ -2137,26 +2141,26 @@
dex::TypeIndex DexFileVerifier::FindFirstAnnotationsDirectoryDefiner(const uint8_t* ptr,
bool* success) {
- const DexFile::AnnotationsDirectoryItem* item =
- reinterpret_cast<const DexFile::AnnotationsDirectoryItem*>(ptr);
+ const dex::AnnotationsDirectoryItem* item =
+ reinterpret_cast<const dex::AnnotationsDirectoryItem*>(ptr);
*success = true;
if (item->fields_size_ != 0) {
- DexFile::FieldAnnotationsItem* field_items = (DexFile::FieldAnnotationsItem*) (item + 1);
+ dex::FieldAnnotationsItem* field_items = (dex::FieldAnnotationsItem*) (item + 1);
LOAD_FIELD(field, field_items[0].field_idx_, "first_annotations_dir_definer field_id",
*success = false; return dex::TypeIndex(DexFile::kDexNoIndex16))
return field->class_idx_;
}
if (item->methods_size_ != 0) {
- DexFile::MethodAnnotationsItem* method_items = (DexFile::MethodAnnotationsItem*) (item + 1);
+ dex::MethodAnnotationsItem* method_items = (dex::MethodAnnotationsItem*) (item + 1);
LOAD_METHOD(method, method_items[0].method_idx_, "first_annotations_dir_definer method id",
*success = false; return dex::TypeIndex(DexFile::kDexNoIndex16))
return method->class_idx_;
}
if (item->parameters_size_ != 0) {
- DexFile::ParameterAnnotationsItem* parameter_items = (DexFile::ParameterAnnotationsItem*) (item + 1);
+ dex::ParameterAnnotationsItem* parameter_items = (dex::ParameterAnnotationsItem*) (item + 1);
LOAD_METHOD(method, parameter_items[0].method_idx_, "first_annotations_dir_definer method id",
*success = false; return dex::TypeIndex(DexFile::kDexNoIndex16))
return method->class_idx_;
@@ -2166,7 +2170,7 @@
}
bool DexFileVerifier::CheckInterStringIdItem() {
- const DexFile::StringId* item = reinterpret_cast<const DexFile::StringId*>(ptr_);
+ const dex::StringId* item = reinterpret_cast<const dex::StringId*>(ptr_);
// Check the map to make sure it has the right offset->type.
if (!CheckOffsetToTypeMap(item->string_data_off_, DexFile::kDexTypeStringDataItem)) {
@@ -2175,7 +2179,7 @@
// Check ordering between items.
if (previous_item_ != nullptr) {
- const DexFile::StringId* prev_item = reinterpret_cast<const DexFile::StringId*>(previous_item_);
+ const dex::StringId* prev_item = reinterpret_cast<const dex::StringId*>(previous_item_);
const char* prev_str = dex_file_->GetStringData(*prev_item);
const char* str = dex_file_->GetStringData(*item);
if (UNLIKELY(CompareModifiedUtf8ToModifiedUtf8AsUtf16CodePointValues(prev_str, str) >= 0)) {
@@ -2184,12 +2188,12 @@
}
}
- ptr_ += sizeof(DexFile::StringId);
+ ptr_ += sizeof(dex::StringId);
return true;
}
bool DexFileVerifier::CheckInterTypeIdItem() {
- const DexFile::TypeId* item = reinterpret_cast<const DexFile::TypeId*>(ptr_);
+ const dex::TypeId* item = reinterpret_cast<const dex::TypeId*>(ptr_);
LOAD_STRING(descriptor, item->descriptor_idx_, "inter_type_id_item descriptor_idx")
@@ -2201,7 +2205,7 @@
// Check ordering between items.
if (previous_item_ != nullptr) {
- const DexFile::TypeId* prev_item = reinterpret_cast<const DexFile::TypeId*>(previous_item_);
+ const dex::TypeId* prev_item = reinterpret_cast<const dex::TypeId*>(previous_item_);
if (UNLIKELY(prev_item->descriptor_idx_ >= item->descriptor_idx_)) {
ErrorStringPrintf("Out-of-order type_ids: %x then %x",
prev_item->descriptor_idx_.index_,
@@ -2210,12 +2214,12 @@
}
}
- ptr_ += sizeof(DexFile::TypeId);
+ ptr_ += sizeof(dex::TypeId);
return true;
}
bool DexFileVerifier::CheckInterProtoIdItem() {
- const DexFile::ProtoId* item = reinterpret_cast<const DexFile::ProtoId*>(ptr_);
+ const dex::ProtoId* item = reinterpret_cast<const dex::ProtoId*>(ptr_);
LOAD_STRING(shorty, item->shorty_idx_, "inter_proto_id_item shorty_idx")
@@ -2258,7 +2262,7 @@
// Check ordering between items. This relies on type_ids being in order.
if (previous_item_ != nullptr) {
- const DexFile::ProtoId* prev = reinterpret_cast<const DexFile::ProtoId*>(previous_item_);
+ const dex::ProtoId* prev = reinterpret_cast<const dex::ProtoId*>(previous_item_);
if (UNLIKELY(prev->return_type_idx_ > item->return_type_idx_)) {
ErrorStringPrintf("Out-of-order proto_id return types");
return false;
@@ -2291,12 +2295,12 @@
}
}
- ptr_ += sizeof(DexFile::ProtoId);
+ ptr_ += sizeof(dex::ProtoId);
return true;
}
bool DexFileVerifier::CheckInterFieldIdItem() {
- const DexFile::FieldId* item = reinterpret_cast<const DexFile::FieldId*>(ptr_);
+ const dex::FieldId* item = reinterpret_cast<const dex::FieldId*>(ptr_);
// Check that the class descriptor is valid.
LOAD_STRING_BY_TYPE(class_descriptor, item->class_idx_, "inter_field_id_item class_idx")
@@ -2321,7 +2325,7 @@
// Check ordering between items. This relies on the other sections being in order.
if (previous_item_ != nullptr) {
- const DexFile::FieldId* prev_item = reinterpret_cast<const DexFile::FieldId*>(previous_item_);
+ const dex::FieldId* prev_item = reinterpret_cast<const dex::FieldId*>(previous_item_);
if (UNLIKELY(prev_item->class_idx_ > item->class_idx_)) {
ErrorStringPrintf("Out-of-order field_ids");
return false;
@@ -2338,12 +2342,12 @@
}
}
- ptr_ += sizeof(DexFile::FieldId);
+ ptr_ += sizeof(dex::FieldId);
return true;
}
bool DexFileVerifier::CheckInterMethodIdItem() {
- const DexFile::MethodId* item = reinterpret_cast<const DexFile::MethodId*>(ptr_);
+ const dex::MethodId* item = reinterpret_cast<const dex::MethodId*>(ptr_);
// Check that the class descriptor is a valid reference name.
LOAD_STRING_BY_TYPE(class_descriptor, item->class_idx_, "inter_method_id_item class_idx")
@@ -2368,7 +2372,7 @@
// Check ordering between items. This relies on the other sections being in order.
if (previous_item_ != nullptr) {
- const DexFile::MethodId* prev_item = reinterpret_cast<const DexFile::MethodId*>(previous_item_);
+ const dex::MethodId* prev_item = reinterpret_cast<const dex::MethodId*>(previous_item_);
if (UNLIKELY(prev_item->class_idx_ > item->class_idx_)) {
ErrorStringPrintf("Out-of-order method_ids");
return false;
@@ -2385,12 +2389,12 @@
}
}
- ptr_ += sizeof(DexFile::MethodId);
+ ptr_ += sizeof(dex::MethodId);
return true;
}
bool DexFileVerifier::CheckInterClassDefItem() {
- const DexFile::ClassDef* item = reinterpret_cast<const DexFile::ClassDef*>(ptr_);
+ const dex::ClassDef* item = reinterpret_cast<const dex::ClassDef*>(ptr_);
// Check that class_idx_ is representable as a uint16_t;
if (UNLIKELY(!IsValidTypeId(item->class_idx_.index_, item->pad1_))) {
@@ -2452,7 +2456,7 @@
// Check that a class is defined after its super class (if the
// latter is defined in the same Dex file).
- const DexFile::ClassDef* superclass_def = dex_file_->FindClassDef(item->superclass_idx_);
+ const dex::ClassDef* superclass_def = dex_file_->FindClassDef(item->superclass_idx_);
if (superclass_def != nullptr) {
// The superclass is defined in this Dex file.
if (superclass_def > item) {
@@ -2476,7 +2480,7 @@
}
// Check interfaces.
- const DexFile::TypeList* interfaces = dex_file_->GetInterfacesList(*item);
+ const dex::TypeList* interfaces = dex_file_->GetInterfacesList(*item);
if (interfaces != nullptr) {
uint32_t size = interfaces->Size();
for (uint32_t i = 0; i < size; i++) {
@@ -2491,7 +2495,7 @@
// Check that a class is defined after the interfaces it implements
// (if they are defined in the same Dex file).
- const DexFile::ClassDef* interface_def =
+ const dex::ClassDef* interface_def =
dex_file_->FindClassDef(interfaces->GetTypeItem(i).type_idx_);
if (interface_def != nullptr) {
// The interface is defined in this Dex file.
@@ -2567,12 +2571,12 @@
}
}
- ptr_ += sizeof(DexFile::ClassDef);
+ ptr_ += sizeof(dex::ClassDef);
return true;
}
bool DexFileVerifier::CheckInterCallSiteIdItem() {
- const DexFile::CallSiteIdItem* item = reinterpret_cast<const DexFile::CallSiteIdItem*>(ptr_);
+ const dex::CallSiteIdItem* item = reinterpret_cast<const dex::CallSiteIdItem*>(ptr_);
// Check call site referenced by item is in encoded array section.
if (!CheckOffsetToTypeMap(item->data_off_, DexFile::kDexTypeEncodedArrayItem)) {
@@ -2622,12 +2626,12 @@
return false;
}
- ptr_ += sizeof(DexFile::CallSiteIdItem);
+ ptr_ += sizeof(dex::CallSiteIdItem);
return true;
}
bool DexFileVerifier::CheckInterMethodHandleItem() {
- const DexFile::MethodHandleItem* item = reinterpret_cast<const DexFile::MethodHandleItem*>(ptr_);
+ const dex::MethodHandleItem* item = reinterpret_cast<const dex::MethodHandleItem*>(ptr_);
DexFile::MethodHandleType method_handle_type =
static_cast<DexFile::MethodHandleType>(item->method_handle_type_);
@@ -2655,14 +2659,13 @@
}
}
- ptr_ += sizeof(DexFile::MethodHandleItem);
+ ptr_ += sizeof(dex::MethodHandleItem);
return true;
}
bool DexFileVerifier::CheckInterAnnotationSetRefList() {
- const DexFile::AnnotationSetRefList* list =
- reinterpret_cast<const DexFile::AnnotationSetRefList*>(ptr_);
- const DexFile::AnnotationSetRefItem* item = list->list_;
+ const dex::AnnotationSetRefList* list = reinterpret_cast<const dex::AnnotationSetRefList*>(ptr_);
+ const dex::AnnotationSetRefItem* item = list->list_;
uint32_t count = list->size_;
for (; count != 0u; --count) {
@@ -2678,7 +2681,7 @@
}
bool DexFileVerifier::CheckInterAnnotationSetItem() {
- const DexFile::AnnotationSetItem* set = reinterpret_cast<const DexFile::AnnotationSetItem*>(ptr_);
+ const dex::AnnotationSetItem* set = reinterpret_cast<const dex::AnnotationSetItem*>(ptr_);
const uint32_t* offsets = set->entries_;
uint32_t count = set->size_;
uint32_t last_idx = 0;
@@ -2689,8 +2692,8 @@
}
// Get the annotation from the offset and the type index for the annotation.
- const DexFile::AnnotationItem* annotation =
- reinterpret_cast<const DexFile::AnnotationItem*>(begin_ + *offsets);
+ const dex::AnnotationItem* annotation =
+ reinterpret_cast<const dex::AnnotationItem*>(begin_ + *offsets);
const uint8_t* data = annotation->annotation_;
DECODE_UNSIGNED_CHECKED_FROM(data, idx);
@@ -2741,8 +2744,8 @@
}
bool DexFileVerifier::CheckInterAnnotationsDirectoryItem() {
- const DexFile::AnnotationsDirectoryItem* item =
- reinterpret_cast<const DexFile::AnnotationsDirectoryItem*>(ptr_);
+ const dex::AnnotationsDirectoryItem* item =
+ reinterpret_cast<const dex::AnnotationsDirectoryItem*>(ptr_);
bool success;
dex::TypeIndex defining_class = FindFirstAnnotationsDirectoryDefiner(ptr_, &success);
if (!success) {
@@ -2755,8 +2758,8 @@
}
// Field annotations follow immediately after the annotations directory.
- const DexFile::FieldAnnotationsItem* field_item =
- reinterpret_cast<const DexFile::FieldAnnotationsItem*>(item + 1);
+ const dex::FieldAnnotationsItem* field_item =
+ reinterpret_cast<const dex::FieldAnnotationsItem*>(item + 1);
uint32_t field_count = item->fields_size_;
for (uint32_t i = 0; i < field_count; i++) {
LOAD_FIELD(field, field_item->field_idx_, "inter_annotations_directory_item field_id",
@@ -2772,8 +2775,8 @@
}
// Method annotations follow immediately after field annotations.
- const DexFile::MethodAnnotationsItem* method_item =
- reinterpret_cast<const DexFile::MethodAnnotationsItem*>(field_item);
+ const dex::MethodAnnotationsItem* method_item =
+ reinterpret_cast<const dex::MethodAnnotationsItem*>(field_item);
uint32_t method_count = item->methods_size_;
for (uint32_t i = 0; i < method_count; i++) {
LOAD_METHOD(method, method_item->method_idx_, "inter_annotations_directory_item method_id",
@@ -2789,8 +2792,8 @@
}
// Parameter annotations follow immediately after method annotations.
- const DexFile::ParameterAnnotationsItem* parameter_item =
- reinterpret_cast<const DexFile::ParameterAnnotationsItem*>(method_item);
+ const dex::ParameterAnnotationsItem* parameter_item =
+ reinterpret_cast<const dex::ParameterAnnotationsItem*>(method_item);
uint32_t parameter_count = item->parameters_size_;
for (uint32_t i = 0; i < parameter_count; i++) {
LOAD_METHOD(parameter_method, parameter_item->method_idx_,
@@ -2946,8 +2949,8 @@
}
bool DexFileVerifier::CheckInterSection() {
- const DexFile::MapList* map = reinterpret_cast<const DexFile::MapList*>(begin_ + header_->map_off_);
- const DexFile::MapItem* item = map->list_;
+ const dex::MapList* map = reinterpret_cast<const dex::MapList*>(begin_ + header_->map_off_);
+ const dex::MapItem* item = map->list_;
uint32_t count = map->size_;
// Cross check the items listed in the map.
@@ -3056,9 +3059,8 @@
return "(error)";
}
- const DexFile::StringId* string_id =
- reinterpret_cast<const DexFile::StringId*>(begin + header->string_ids_off_)
- + string_idx.index_;
+ const dex::StringId* string_id =
+ reinterpret_cast<const dex::StringId*>(begin + header->string_ids_off_) + string_idx.index_;
// Assume that the data is OK at this point. String data has been checked at this point.
@@ -3079,8 +3081,8 @@
// a valid defining class.
CHECK_LT(class_idx.index_, header->type_ids_size_);
- const DexFile::TypeId* type_id =
- reinterpret_cast<const DexFile::TypeId*>(begin + header->type_ids_off_) + class_idx.index_;
+ const dex::TypeId* type_id =
+ reinterpret_cast<const dex::TypeId*>(begin + header->type_ids_off_) + class_idx.index_;
// Assume that the data is OK at this point. Type id offsets have been checked at this point.
@@ -3093,8 +3095,8 @@
// The `idx` has already been checked in `DexFileVerifier::CheckClassDataItemField()`.
CHECK_LT(idx, header->field_ids_size_);
- const DexFile::FieldId* field_id =
- reinterpret_cast<const DexFile::FieldId*>(begin + header->field_ids_off_) + idx;
+ const dex::FieldId* field_id =
+ reinterpret_cast<const dex::FieldId*>(begin + header->field_ids_off_) + idx;
// Assume that the data is OK at this point. Field id offsets have been checked at this point.
@@ -3110,8 +3112,8 @@
// The `idx` has already been checked in `DexFileVerifier::CheckClassDataItemMethod()`.
CHECK_LT(idx, header->method_ids_size_);
- const DexFile::MethodId* method_id =
- reinterpret_cast<const DexFile::MethodId*>(begin + header->method_ids_off_) + idx;
+ const dex::MethodId* method_id =
+ reinterpret_cast<const dex::MethodId*>(begin + header->method_ids_off_) + idx;
// Assume that the data is OK at this point. Method id offsets have been checked at this point.
@@ -3202,16 +3204,16 @@
void DexFileVerifier::FindStringRangesForMethodNames() {
// Use DexFile::StringId* as RandomAccessIterator.
- const DexFile::StringId* first = reinterpret_cast<const DexFile::StringId*>(
+ const dex::StringId* first = reinterpret_cast<const dex::StringId*>(
begin_ + header_->string_ids_off_);
- const DexFile::StringId* last = first + header_->string_ids_size_;
+ const dex::StringId* last = first + header_->string_ids_size_;
- auto get_string = [begin = begin_](const DexFile::StringId& id) {
+ auto get_string = [begin = begin_](const dex::StringId& id) {
const uint8_t* str_data_ptr = begin + id.string_data_off_;
DecodeUnsignedLeb128(&str_data_ptr);
return reinterpret_cast<const char*>(str_data_ptr);
};
- auto compare = [&get_string](const DexFile::StringId& lhs, const char* rhs) {
+ auto compare = [&get_string](const dex::StringId& lhs, const char* rhs) {
return CompareModifiedUtf8ToModifiedUtf8AsUtf16CodePointValues(get_string(lhs), rhs) < 0;
};
@@ -3451,8 +3453,8 @@
constructor_flags == (kAccConstructor | kAccStatic));
// Check signature matches expectations.
- const DexFile::MethodId* const method_id = CheckLoadMethodId(method_index,
- "Bad <init>/<clinit> method id");
+ const dex::MethodId* const method_id = CheckLoadMethodId(method_index,
+ "Bad <init>/<clinit> method id");
if (method_id == nullptr) {
return false;
}
@@ -3462,8 +3464,8 @@
// TODO(oth): the error message here is to satisfy the MethodId test
// in the DexFileVerifierTest. The test is checking that the error
// contains this string if the index is out of range.
- const DexFile::ProtoId* const proto_id = CheckLoadProtoId(method_id->proto_idx_,
- "inter_method_id_item proto_idx");
+ const dex::ProtoId* const proto_id = CheckLoadProtoId(method_id->proto_idx_,
+ "inter_method_id_item proto_idx");
if (proto_id == nullptr) {
return false;
}
diff --git a/libdexfile/dex/dex_file_verifier.h b/libdexfile/dex/dex_file_verifier.h
index a81df48..b51a417 100644
--- a/libdexfile/dex/dex_file_verifier.h
+++ b/libdexfile/dex/dex_file_verifier.h
@@ -79,7 +79,7 @@
bool CheckMap();
uint32_t ReadUnsignedLittleEndian(uint32_t size);
- bool CheckAndGetHandlerOffsets(const DexFile::CodeItem* code_item,
+ bool CheckAndGetHandlerOffsets(const dex::CodeItem* code_item,
uint32_t* handler_offsets, uint32_t handlers_size);
bool CheckClassDataItemField(uint32_t idx,
uint32_t access_flags,
@@ -95,7 +95,7 @@
size_t* remaining_directs);
ALWAYS_INLINE
bool CheckOrder(const char* type_descr, uint32_t curr_index, uint32_t prev_index);
- bool CheckStaticFieldTypes(const DexFile::ClassDef* class_def);
+ bool CheckStaticFieldTypes(const dex::ClassDef* class_def);
bool CheckPadding(size_t offset, uint32_t aligned_offset, DexFile::MapItemType type);
bool CheckEncodedValue();
@@ -110,7 +110,7 @@
ClassAccessor::Field* field,
bool* have_class,
dex::TypeIndex* class_type_index,
- const DexFile::ClassDef** class_def);
+ const dex::ClassDef** class_def);
// Check all methods of the given type from the given iterator. Load the class data from the first
// method, if necessary (and return it), or use the given values.
bool CheckIntraClassDataItemMethods(ClassAccessor::Method* method,
@@ -119,7 +119,7 @@
size_t num_directs,
bool* have_class,
dex::TypeIndex* class_type_index,
- const DexFile::ClassDef** class_def);
+ const dex::ClassDef** class_def);
bool CheckIntraCodeItem();
bool CheckIntraStringDataItem();
@@ -166,9 +166,9 @@
// Load a field/method/proto Id by index. Checks whether the index is in bounds, printing the
// error if not. If there is an error, null is returned.
- const DexFile::FieldId* CheckLoadFieldId(uint32_t idx, const char* error_fmt);
- const DexFile::MethodId* CheckLoadMethodId(uint32_t idx, const char* error_fmt);
- const DexFile::ProtoId* CheckLoadProtoId(dex::ProtoIndex idx, const char* error_fmt);
+ const dex::FieldId* CheckLoadFieldId(uint32_t idx, const char* error_fmt);
+ const dex::MethodId* CheckLoadMethodId(uint32_t idx, const char* error_fmt);
+ const dex::ProtoId* CheckLoadProtoId(dex::ProtoIndex idx, const char* error_fmt);
void ErrorStringPrintf(const char* fmt, ...)
__attribute__((__format__(__printf__, 2, 3))) COLD_ATTR;
@@ -182,7 +182,7 @@
bool FindClassIndexAndDef(uint32_t index,
bool is_field,
dex::TypeIndex* class_type_index,
- const DexFile::ClassDef** output_class_def);
+ const dex::ClassDef** output_class_def);
// Check validity of the given access flags, interpreted for a field in the context of a class
// with the given second access flags.
@@ -247,7 +247,7 @@
std::string failure_reason_;
// Set of type ids for which there are ClassDef elements in the dex file.
- std::unordered_set<decltype(DexFile::ClassDef::class_idx_)> defined_classes_;
+ std::unordered_set<decltype(dex::ClassDef::class_idx_)> defined_classes_;
// Cached string indices for "interesting" entries wrt/ method names. Will be populated by
// FindStringRangesForMethodNames (which is automatically called before verifying the
diff --git a/libdexfile/dex/dex_file_verifier_test.cc b/libdexfile/dex/dex_file_verifier_test.cc
index c3180f0..b2cff4f 100644
--- a/libdexfile/dex/dex_file_verifier_test.cc
+++ b/libdexfile/dex/dex_file_verifier_test.cc
@@ -153,7 +153,7 @@
kGoodTestDex,
"method_id_class_idx",
[](DexFile* dex_file) {
- DexFile::MethodId* method_id = const_cast<DexFile::MethodId*>(&dex_file->GetMethodId(0));
+ dex::MethodId* method_id = const_cast<dex::MethodId*>(&dex_file->GetMethodId(0));
method_id->class_idx_ = dex::TypeIndex(0xFF);
},
"could not find declaring class for direct method index 0");
@@ -163,7 +163,7 @@
kGoodTestDex,
"method_id_proto_idx",
[](DexFile* dex_file) {
- DexFile::MethodId* method_id = const_cast<DexFile::MethodId*>(&dex_file->GetMethodId(0));
+ dex::MethodId* method_id = const_cast<dex::MethodId*>(&dex_file->GetMethodId(0));
method_id->proto_idx_ = dex::ProtoIndex(0xFF);
},
"inter_method_id_item proto_idx");
@@ -173,7 +173,7 @@
kGoodTestDex,
"method_id_name_idx",
[](DexFile* dex_file) {
- DexFile::MethodId* method_id = const_cast<DexFile::MethodId*>(&dex_file->GetMethodId(0));
+ dex::MethodId* method_id = const_cast<dex::MethodId*>(&dex_file->GetMethodId(0));
method_id->name_idx_ = dex::StringIndex(0xFF);
},
"Bad index for method flags verification");
@@ -244,7 +244,7 @@
for (const ClassAccessor::Method& method : accessor.GetMethods()) {
uint32_t method_index = method.GetIndex();
dex::StringIndex name_index = dex_file->GetMethodId(method_index).name_idx_;
- const DexFile::StringId& string_id = dex_file->GetStringId(name_index);
+ const dex::StringId& string_id = dex_file->GetStringId(name_index);
const char* str = dex_file->GetStringData(string_id);
if (strcmp(name, str) == 0) {
if (method_idx != nullptr) {
@@ -837,7 +837,7 @@
for (const ClassAccessor::Field& field : accessor.GetFields()) {
uint32_t field_index = field.GetIndex();
dex::StringIndex name_index = dex_file->GetFieldId(field_index).name_idx_;
- const DexFile::StringId& string_id = dex_file->GetStringId(name_index);
+ const dex::StringId& string_id = dex_file->GetStringId(name_index);
const char* str = dex_file->GetStringData(string_id);
if (strcmp(name, str) == 0) {
// Go to the back of the access flags.
@@ -1415,9 +1415,9 @@
dex_file->GetMethodId(method_idx + 1).proto_idx_.index_);
// Their return types should be the same.
dex::ProtoIndex proto1_idx = dex_file->GetMethodId(method_idx).proto_idx_;
- const DexFile::ProtoId& proto1 = dex_file->GetProtoId(proto1_idx);
+ const dex::ProtoId& proto1 = dex_file->GetProtoId(proto1_idx);
dex::ProtoIndex proto2_idx(proto1_idx.index_ + 1u);
- const DexFile::ProtoId& proto2 = dex_file->GetProtoId(proto2_idx);
+ const dex::ProtoId& proto2 = dex_file->GetProtoId(proto2_idx);
CHECK_EQ(proto1.return_type_idx_, proto2.return_type_idx_);
// And the first should not have any parameters while the second should have some.
CHECK(!DexFileParameterIterator(*dex_file, proto1).HasNext());
diff --git a/libdexfile/dex/method_reference.h b/libdexfile/dex/method_reference.h
index 266582b..f66ac30 100644
--- a/libdexfile/dex/method_reference.h
+++ b/libdexfile/dex/method_reference.h
@@ -31,7 +31,7 @@
std::string PrettyMethod(bool with_signature = true) const {
return dex_file->PrettyMethod(index, with_signature);
}
- const DexFile::MethodId& GetMethodId() const {
+ const dex::MethodId& GetMethodId() const {
return dex_file->GetMethodId(index);
}
};
@@ -50,8 +50,8 @@
bool SlowCompare(MethodReference mr1, MethodReference mr2) const {
// The order is the same as for method ids in a single dex file.
// Compare the class descriptors first.
- const DexFile::MethodId& mid1 = mr1.GetMethodId();
- const DexFile::MethodId& mid2 = mr2.GetMethodId();
+ const dex::MethodId& mid1 = mr1.GetMethodId();
+ const dex::MethodId& mid2 = mr2.GetMethodId();
int descriptor_diff = strcmp(mr1.dex_file->StringByTypeIdx(mid1.class_idx_),
mr2.dex_file->StringByTypeIdx(mid2.class_idx_));
if (descriptor_diff != 0) {
@@ -63,17 +63,17 @@
return name_diff < 0;
}
// And then compare proto ids, starting with return type comparison.
- const DexFile::ProtoId& prid1 = mr1.dex_file->GetProtoId(mid1.proto_idx_);
- const DexFile::ProtoId& prid2 = mr2.dex_file->GetProtoId(mid2.proto_idx_);
+ const dex::ProtoId& prid1 = mr1.dex_file->GetProtoId(mid1.proto_idx_);
+ const dex::ProtoId& prid2 = mr2.dex_file->GetProtoId(mid2.proto_idx_);
int return_type_diff = strcmp(mr1.dex_file->StringByTypeIdx(prid1.return_type_idx_),
mr2.dex_file->StringByTypeIdx(prid2.return_type_idx_));
if (return_type_diff != 0) {
return return_type_diff < 0;
}
// And finishing with lexicographical parameter comparison.
- const DexFile::TypeList* params1 = mr1.dex_file->GetProtoParameters(prid1);
+ const dex::TypeList* params1 = mr1.dex_file->GetProtoParameters(prid1);
size_t param1_size = (params1 != nullptr) ? params1->Size() : 0u;
- const DexFile::TypeList* params2 = mr2.dex_file->GetProtoParameters(prid2);
+ const dex::TypeList* params2 = mr2.dex_file->GetProtoParameters(prid2);
size_t param2_size = (params2 != nullptr) ? params2->Size() : 0u;
for (size_t i = 0, num = std::min(param1_size, param2_size); i != num; ++i) {
int param_diff = strcmp(mr1.dex_file->StringByTypeIdx(params1->GetTypeItem(i).type_idx_),
diff --git a/libdexfile/dex/signature-inl.h b/libdexfile/dex/signature-inl.h
new file mode 100644
index 0000000..ccc7ea9
--- /dev/null
+++ b/libdexfile/dex/signature-inl.h
@@ -0,0 +1,83 @@
+/*
+ * Copyright (C) 2011 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef ART_LIBDEXFILE_DEX_SIGNATURE_INL_H_
+#define ART_LIBDEXFILE_DEX_SIGNATURE_INL_H_
+
+#include "signature.h"
+
+#include "base/stringpiece.h"
+#include "dex_file-inl.h"
+
+namespace art {
+
+inline bool Signature::operator==(const Signature& rhs) const {
+ if (dex_file_ == nullptr) {
+ return rhs.dex_file_ == nullptr;
+ }
+ if (rhs.dex_file_ == nullptr) {
+ return false;
+ }
+ if (dex_file_ == rhs.dex_file_) {
+ return proto_id_ == rhs.proto_id_;
+ }
+ uint32_t lhs_shorty_len; // For a shorty utf16 length == mutf8 length.
+ const char* lhs_shorty_data = dex_file_->StringDataAndUtf16LengthByIdx(proto_id_->shorty_idx_,
+ &lhs_shorty_len);
+ StringPiece lhs_shorty(lhs_shorty_data, lhs_shorty_len);
+ {
+ uint32_t rhs_shorty_len;
+ const char* rhs_shorty_data =
+ rhs.dex_file_->StringDataAndUtf16LengthByIdx(rhs.proto_id_->shorty_idx_,
+ &rhs_shorty_len);
+ StringPiece rhs_shorty(rhs_shorty_data, rhs_shorty_len);
+ if (lhs_shorty != rhs_shorty) {
+ return false; // Shorty mismatch.
+ }
+ }
+ if (lhs_shorty[0] == 'L') {
+ const dex::TypeId& return_type_id = dex_file_->GetTypeId(proto_id_->return_type_idx_);
+ const dex::TypeId& rhs_return_type_id =
+ rhs.dex_file_->GetTypeId(rhs.proto_id_->return_type_idx_);
+ if (!DexFile::StringEquals(dex_file_, return_type_id.descriptor_idx_,
+ rhs.dex_file_, rhs_return_type_id.descriptor_idx_)) {
+ return false; // Return type mismatch.
+ }
+ }
+ if (lhs_shorty.find('L', 1) != StringPiece::npos) {
+ const dex::TypeList* params = dex_file_->GetProtoParameters(*proto_id_);
+ const dex::TypeList* rhs_params = rhs.dex_file_->GetProtoParameters(*rhs.proto_id_);
+ // We found a reference parameter in the matching shorty, so both lists must be non-empty.
+ DCHECK(params != nullptr);
+ DCHECK(rhs_params != nullptr);
+ uint32_t params_size = params->Size();
+ DCHECK_EQ(params_size, rhs_params->Size()); // Parameter list size must match.
+ for (uint32_t i = 0; i < params_size; ++i) {
+ const dex::TypeId& param_id = dex_file_->GetTypeId(params->GetTypeItem(i).type_idx_);
+ const dex::TypeId& rhs_param_id =
+ rhs.dex_file_->GetTypeId(rhs_params->GetTypeItem(i).type_idx_);
+ if (!DexFile::StringEquals(dex_file_, param_id.descriptor_idx_,
+ rhs.dex_file_, rhs_param_id.descriptor_idx_)) {
+ return false; // Parameter type mismatch.
+ }
+ }
+ }
+ return true;
+}
+
+} // namespace art
+
+#endif // ART_LIBDEXFILE_DEX_SIGNATURE_INL_H_
diff --git a/libdexfile/dex/signature.cc b/libdexfile/dex/signature.cc
new file mode 100644
index 0000000..34b4b55
--- /dev/null
+++ b/libdexfile/dex/signature.cc
@@ -0,0 +1,88 @@
+/*
+ * Copyright (C) 2011 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "signature-inl.h"
+
+#include <string.h>
+
+#include <ostream>
+#include <type_traits>
+
+namespace art {
+
+using dex::TypeList;
+
+std::string Signature::ToString() const {
+ if (dex_file_ == nullptr) {
+ CHECK(proto_id_ == nullptr);
+ return "<no signature>";
+ }
+ const TypeList* params = dex_file_->GetProtoParameters(*proto_id_);
+ std::string result;
+ if (params == nullptr) {
+ result += "()";
+ } else {
+ result += "(";
+ for (uint32_t i = 0; i < params->Size(); ++i) {
+ result += dex_file_->StringByTypeIdx(params->GetTypeItem(i).type_idx_);
+ }
+ result += ")";
+ }
+ result += dex_file_->StringByTypeIdx(proto_id_->return_type_idx_);
+ return result;
+}
+
+uint32_t Signature::GetNumberOfParameters() const {
+ const TypeList* params = dex_file_->GetProtoParameters(*proto_id_);
+ return (params != nullptr) ? params->Size() : 0;
+}
+
+bool Signature::IsVoid() const {
+ const char* return_type = dex_file_->GetReturnTypeDescriptor(*proto_id_);
+ return strcmp(return_type, "V") == 0;
+}
+
+bool Signature::operator==(const StringPiece& rhs) const {
+ if (dex_file_ == nullptr) {
+ return false;
+ }
+ StringPiece tail(rhs);
+ if (!tail.starts_with("(")) {
+ return false; // Invalid signature
+ }
+ tail.remove_prefix(1); // "(";
+ const TypeList* params = dex_file_->GetProtoParameters(*proto_id_);
+ if (params != nullptr) {
+ for (uint32_t i = 0; i < params->Size(); ++i) {
+ StringPiece param(dex_file_->StringByTypeIdx(params->GetTypeItem(i).type_idx_));
+ if (!tail.starts_with(param)) {
+ return false;
+ }
+ tail.remove_prefix(param.length());
+ }
+ }
+ if (!tail.starts_with(")")) {
+ return false;
+ }
+ tail.remove_prefix(1); // ")";
+ return tail == dex_file_->StringByTypeIdx(proto_id_->return_type_idx_);
+}
+
+std::ostream& operator<<(std::ostream& os, const Signature& sig) {
+ return os << sig.ToString();
+}
+
+} // namespace art
diff --git a/libdexfile/dex/signature.h b/libdexfile/dex/signature.h
new file mode 100644
index 0000000..235f37c
--- /dev/null
+++ b/libdexfile/dex/signature.h
@@ -0,0 +1,69 @@
+/*
+ * Copyright (C) 2011 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef ART_LIBDEXFILE_DEX_SIGNATURE_H_
+#define ART_LIBDEXFILE_DEX_SIGNATURE_H_
+
+#include <iosfwd>
+#include <string>
+
+#include <android-base/logging.h>
+
+#include "base/value_object.h"
+
+namespace art {
+
+namespace dex {
+struct ProtoId;
+} // namespace dex
+class DexFile;
+class StringPiece;
+
+// Abstract the signature of a method.
+class Signature : public ValueObject {
+ public:
+ std::string ToString() const;
+
+ static Signature NoSignature() {
+ return Signature();
+ }
+
+ bool IsVoid() const;
+ uint32_t GetNumberOfParameters() const;
+
+ bool operator==(const Signature& rhs) const;
+ bool operator!=(const Signature& rhs) const {
+ return !(*this == rhs);
+ }
+
+ bool operator==(const StringPiece& rhs) const;
+
+ private:
+ Signature(const DexFile* dex, const dex::ProtoId& proto) : dex_file_(dex), proto_id_(&proto) {
+ }
+
+ Signature() = default;
+
+ friend class DexFile;
+
+ const DexFile* const dex_file_ = nullptr;
+ const dex::ProtoId* const proto_id_ = nullptr;
+};
+std::ostream& operator<<(std::ostream& os, const Signature& sig);
+
+} // namespace art
+
+#endif // ART_LIBDEXFILE_DEX_SIGNATURE_H_
diff --git a/libdexfile/dex/standard_dex_file.cc b/libdexfile/dex/standard_dex_file.cc
index 40dcafd..8bac44e 100644
--- a/libdexfile/dex/standard_dex_file.cc
+++ b/libdexfile/dex/standard_dex_file.cc
@@ -72,7 +72,7 @@
return GetDexVersion() >= DexFile::kDefaultMethodsVersion;
}
-uint32_t StandardDexFile::GetCodeItemSize(const DexFile::CodeItem& item) const {
+uint32_t StandardDexFile::GetCodeItemSize(const dex::CodeItem& item) const {
DCHECK(IsInDataSection(&item));
return reinterpret_cast<uintptr_t>(CodeItemDataAccessor(*this, &item).CodeItemDataEnd()) -
reinterpret_cast<uintptr_t>(&item);
diff --git a/libdexfile/dex/standard_dex_file.h b/libdexfile/dex/standard_dex_file.h
index fd7e78f..838d4e3 100644
--- a/libdexfile/dex/standard_dex_file.h
+++ b/libdexfile/dex/standard_dex_file.h
@@ -32,7 +32,7 @@
// Same for now.
};
- struct CodeItem : public DexFile::CodeItem {
+ struct CodeItem : public dex::CodeItem {
static constexpr size_t kAlignment = 4;
private:
@@ -81,7 +81,7 @@
bool SupportsDefaultMethods() const override;
- uint32_t GetCodeItemSize(const DexFile::CodeItem& item) const override;
+ uint32_t GetCodeItemSize(const dex::CodeItem& item) const override;
size_t GetDequickenedSize() const override {
return Size();
diff --git a/libdexfile/dex/test_dex_file_builder.h b/libdexfile/dex/test_dex_file_builder.h
index 072aafb..2b0bad0 100644
--- a/libdexfile/dex/test_dex_file_builder.h
+++ b/libdexfile/dex/test_dex_file_builder.h
@@ -112,7 +112,7 @@
header->string_ids_size_ = strings_.size();
header->string_ids_off_ = strings_.empty() ? 0u : string_ids_offset;
- uint32_t type_ids_offset = string_ids_offset + strings_.size() * sizeof(DexFile::StringId);
+ uint32_t type_ids_offset = string_ids_offset + strings_.size() * sizeof(dex::StringId);
uint32_t type_idx = 0u;
for (auto& entry : types_) {
entry.second = type_idx;
@@ -121,7 +121,7 @@
header->type_ids_size_ = types_.size();
header->type_ids_off_ = types_.empty() ? 0u : type_ids_offset;
- uint32_t proto_ids_offset = type_ids_offset + types_.size() * sizeof(DexFile::TypeId);
+ uint32_t proto_ids_offset = type_ids_offset + types_.size() * sizeof(dex::TypeId);
uint32_t proto_idx = 0u;
for (auto& entry : protos_) {
entry.second.idx = proto_idx;
@@ -129,7 +129,7 @@
size_t num_args = entry.first.args.size();
if (num_args != 0u) {
entry.second.data_offset = RoundUp(data_section_size, 4u);
- data_section_size = entry.second.data_offset + 4u + num_args * sizeof(DexFile::TypeItem);
+ data_section_size = entry.second.data_offset + 4u + num_args * sizeof(dex::TypeItem);
} else {
entry.second.data_offset = 0u;
}
@@ -137,7 +137,7 @@
header->proto_ids_size_ = protos_.size();
header->proto_ids_off_ = protos_.empty() ? 0u : proto_ids_offset;
- uint32_t field_ids_offset = proto_ids_offset + protos_.size() * sizeof(DexFile::ProtoId);
+ uint32_t field_ids_offset = proto_ids_offset + protos_.size() * sizeof(dex::ProtoId);
uint32_t field_idx = 0u;
for (auto& entry : fields_) {
entry.second = field_idx;
@@ -146,7 +146,7 @@
header->field_ids_size_ = fields_.size();
header->field_ids_off_ = fields_.empty() ? 0u : field_ids_offset;
- uint32_t method_ids_offset = field_ids_offset + fields_.size() * sizeof(DexFile::FieldId);
+ uint32_t method_ids_offset = field_ids_offset + fields_.size() * sizeof(dex::FieldId);
uint32_t method_idx = 0u;
for (auto& entry : methods_) {
entry.second = method_idx;
@@ -159,7 +159,7 @@
header->class_defs_size_ = 0u;
header->class_defs_off_ = 0u;
- uint32_t data_section_offset = method_ids_offset + methods_.size() * sizeof(DexFile::MethodId);
+ uint32_t data_section_offset = method_ids_offset + methods_.size() * sizeof(dex::MethodId);
header->data_size_ = data_section_size;
header->data_off_ = (data_section_size != 0u) ? data_section_offset : 0u;
@@ -172,11 +172,11 @@
uint32_t raw_offset = data_section_offset + entry.second.data_offset;
dex_file_data_[raw_offset] = static_cast<uint8_t>(entry.first.size());
std::memcpy(&dex_file_data_[raw_offset + 1], entry.first.c_str(), entry.first.size() + 1);
- Write32(string_ids_offset + entry.second.idx * sizeof(DexFile::StringId), raw_offset);
+ Write32(string_ids_offset + entry.second.idx * sizeof(dex::StringId), raw_offset);
}
for (const auto& entry : types_) {
- Write32(type_ids_offset + entry.second * sizeof(DexFile::TypeId), GetStringIdx(entry.first));
+ Write32(type_ids_offset + entry.second * sizeof(dex::TypeId), GetStringIdx(entry.first));
++type_idx;
}
@@ -184,7 +184,7 @@
size_t num_args = entry.first.args.size();
uint32_t type_list_offset =
(num_args != 0u) ? data_section_offset + entry.second.data_offset : 0u;
- uint32_t raw_offset = proto_ids_offset + entry.second.idx * sizeof(DexFile::ProtoId);
+ uint32_t raw_offset = proto_ids_offset + entry.second.idx * sizeof(dex::ProtoId);
Write32(raw_offset + 0u, GetStringIdx(entry.first.shorty));
Write16(raw_offset + 4u, GetTypeIdx(entry.first.return_type));
Write32(raw_offset + 8u, type_list_offset);
@@ -192,21 +192,21 @@
CHECK_NE(entry.second.data_offset, 0u);
Write32(type_list_offset, num_args);
for (size_t i = 0; i != num_args; ++i) {
- Write16(type_list_offset + 4u + i * sizeof(DexFile::TypeItem),
+ Write16(type_list_offset + 4u + i * sizeof(dex::TypeItem),
GetTypeIdx(entry.first.args[i]));
}
}
}
for (const auto& entry : fields_) {
- uint32_t raw_offset = field_ids_offset + entry.second * sizeof(DexFile::FieldId);
+ uint32_t raw_offset = field_ids_offset + entry.second * sizeof(dex::FieldId);
Write16(raw_offset + 0u, GetTypeIdx(entry.first.class_descriptor));
Write16(raw_offset + 2u, GetTypeIdx(entry.first.type));
Write32(raw_offset + 4u, GetStringIdx(entry.first.name));
}
for (const auto& entry : methods_) {
- uint32_t raw_offset = method_ids_offset + entry.second * sizeof(DexFile::MethodId);
+ uint32_t raw_offset = method_ids_offset + entry.second * sizeof(dex::MethodId);
Write16(raw_offset + 0u, GetTypeIdx(entry.first.class_descriptor));
auto it = protos_.find(*entry.first.proto);
CHECK(it != protos_.end());
diff --git a/libdexfile/dex/type_lookup_table.cc b/libdexfile/dex/type_lookup_table.cc
index 7d80a2e..c46b488 100644
--- a/libdexfile/dex/type_lookup_table.cc
+++ b/libdexfile/dex/type_lookup_table.cc
@@ -47,9 +47,9 @@
// occupied then delay the insertion of the element to the second stage to reduce probing
// distance.
for (size_t class_def_idx = 0; class_def_idx < dex_file.NumClassDefs(); ++class_def_idx) {
- const DexFile::ClassDef& class_def = dex_file.GetClassDef(class_def_idx);
- const DexFile::TypeId& type_id = dex_file.GetTypeId(class_def.class_idx_);
- const DexFile::StringId& str_id = dex_file.GetStringId(type_id.descriptor_idx_);
+ const dex::ClassDef& class_def = dex_file.GetClassDef(class_def_idx);
+ const dex::TypeId& type_id = dex_file.GetTypeId(class_def.class_idx_);
+ const dex::StringId& str_id = dex_file.GetStringId(type_id.descriptor_idx_);
const uint32_t hash = ComputeModifiedUtf8Hash(dex_file.GetStringData(str_id));
const uint32_t pos = hash & mask;
if (entries[pos].IsEmpty()) {
@@ -62,9 +62,9 @@
// The second stage. The initial position of these elements had a collision. Put these elements
// into the nearest free cells and link them together by updating next_pos_delta.
for (uint16_t class_def_idx : conflict_class_defs) {
- const DexFile::ClassDef& class_def = dex_file.GetClassDef(class_def_idx);
- const DexFile::TypeId& type_id = dex_file.GetTypeId(class_def.class_idx_);
- const DexFile::StringId& str_id = dex_file.GetStringId(type_id.descriptor_idx_);
+ const dex::ClassDef& class_def = dex_file.GetClassDef(class_def_idx);
+ const dex::TypeId& type_id = dex_file.GetTypeId(class_def.class_idx_);
+ const dex::StringId& str_id = dex_file.GetStringId(type_id.descriptor_idx_);
const uint32_t hash = ComputeModifiedUtf8Hash(dex_file.GetStringData(str_id));
// Find the last entry in the chain.
uint32_t tail_pos = hash & mask;
diff --git a/libdexfile/external/include/art_api/ext_dex_file.h b/libdexfile/external/include/art_api/ext_dex_file.h
index 5f64ab1..4a52a2b 100644
--- a/libdexfile/external/include/art_api/ext_dex_file.h
+++ b/libdexfile/external/include/art_api/ext_dex_file.h
@@ -98,11 +98,11 @@
// Minimal std::string look-alike for a string returned from libdexfile.
class DexString final {
public:
- DexString(DexString&& dex_str) { ReplaceExtString(std::move(dex_str)); }
+ DexString(DexString&& dex_str) noexcept { ReplaceExtString(std::move(dex_str)); }
explicit DexString(const char* str = "") : ext_string_(ExtDexFileMakeString(str)) {}
~DexString() { ExtDexFileFreeString(ext_string_); }
- DexString& operator=(DexString&& dex_str) {
+ DexString& operator=(DexString&& dex_str) noexcept {
ReplaceExtString(std::move(dex_str));
return *this;
}
@@ -163,7 +163,7 @@
// thread-safe.
class DexFile {
public:
- DexFile(DexFile&& dex_file) {
+ DexFile(DexFile&& dex_file) noexcept {
ext_dex_file_ = dex_file.ext_dex_file_;
dex_file.ext_dex_file_ = nullptr;
}
diff --git a/libprofile/profile/profile_compilation_info.cc b/libprofile/profile/profile_compilation_info.cc
index 02f6344..8b8569d 100644
--- a/libprofile/profile/profile_compilation_info.cc
+++ b/libprofile/profile/profile_compilation_info.cc
@@ -58,6 +58,12 @@
// profile_compilation_info object. All the profile line headers are now placed together
// before corresponding method_encodings and class_ids.
const uint8_t ProfileCompilationInfo::kProfileVersion[] = { '0', '1', '0', '\0' };
+const uint8_t ProfileCompilationInfo::kProfileVersionWithCounters[] = { '5', '0', '0', '\0' };
+
+static_assert(sizeof(ProfileCompilationInfo::kProfileVersion) == 4,
+ "Invalid profile version size");
+static_assert(sizeof(ProfileCompilationInfo::kProfileVersionWithCounters) == 4,
+ "Invalid profile version size");
// The name of the profile entry in the dex metadata file.
// DO NOT CHANGE THIS! (it's similar to classes.dex in the apk files).
@@ -84,18 +90,31 @@
return kDebugIgnoreChecksum || dex_file_checksum == checksum;
}
+// For storage efficiency we store aggregation counts of up to at most 2^16.
+static uint16_t IncrementAggregationCounter(uint16_t counter, uint16_t value) {
+ if (counter < (std::numeric_limits<uint16_t>::max() - value)) {
+ return counter + value;
+ } else {
+ return std::numeric_limits<uint16_t>::max();
+ }
+}
+
ProfileCompilationInfo::ProfileCompilationInfo(ArenaPool* custom_arena_pool)
: default_arena_pool_(),
allocator_(custom_arena_pool),
info_(allocator_.Adapter(kArenaAllocProfile)),
- profile_key_map_(std::less<const std::string>(), allocator_.Adapter(kArenaAllocProfile)) {
+ profile_key_map_(std::less<const std::string>(), allocator_.Adapter(kArenaAllocProfile)),
+ aggregation_count_(0) {
+ InitProfileVersionInternal(kProfileVersion);
}
ProfileCompilationInfo::ProfileCompilationInfo()
: default_arena_pool_(),
allocator_(&default_arena_pool_),
info_(allocator_.Adapter(kArenaAllocProfile)),
- profile_key_map_(std::less<const std::string>(), allocator_.Adapter(kArenaAllocProfile)) {
+ profile_key_map_(std::less<const std::string>(), allocator_.Adapter(kArenaAllocProfile)),
+ aggregation_count_(0) {
+ InitProfileVersionInternal(kProfileVersion);
}
ProfileCompilationInfo::~ProfileCompilationInfo() {
@@ -326,13 +345,15 @@
/**
* Serialization format:
* [profile_header, zipped[[profile_line_header1, profile_line_header2...],[profile_line_data1,
- * profile_line_data2...]]]
+ * profile_line_data2...]],global_aggregation_counter]
* profile_header:
* magic,version,number_of_dex_files,uncompressed_size_of_zipped_data,compressed_data_size
* profile_line_header:
* dex_location,number_of_classes,methods_region_size,dex_location_checksum,num_method_ids
* profile_line_data:
- * method_encoding_1,method_encoding_2...,class_id1,class_id2...,startup/post startup bitmap
+ * method_encoding_1,method_encoding_2...,class_id1,class_id2...,startup/post startup bitmap,
+ * num_classes,class_counters,num_methods,method_counters
+ * The aggregation counters are only stored if the profile version is kProfileVersionWithCounters.
* The method_encoding is:
* method_id,number_of_inline_caches,inline_cache1,inline_cache2...
* The inline_cache is:
@@ -355,7 +376,7 @@
if (!WriteBuffer(fd, kProfileMagic, sizeof(kProfileMagic))) {
return false;
}
- if (!WriteBuffer(fd, kProfileVersion, sizeof(kProfileVersion))) {
+ if (!WriteBuffer(fd, version_, sizeof(version_))) {
return false;
}
DCHECK_LE(info_.size(), std::numeric_limits<uint8_t>::max());
@@ -370,7 +391,17 @@
sizeof(uint16_t) * dex_data.class_set.size() +
methods_region_size +
dex_data.bitmap_storage.size();
+ if (StoresAggregationCounters()) {
+ required_capacity += sizeof(uint16_t) + // num class counters
+ sizeof(uint16_t) * dex_data.class_set.size() +
+ sizeof(uint16_t) + // num method counter
+ sizeof(uint16_t) * dex_data_ptr->GetNumMethodCounters();
+ }
}
+ if (StoresAggregationCounters()) {
+ required_capacity += sizeof(uint16_t); // global counter
+ }
+
// Allow large profiles for non target builds for the case where we are merging many profiles
// to generate a boot image profile.
if (kIsTargetBuild && required_capacity > kProfileSizeErrorThresholdInBytes) {
@@ -443,6 +474,24 @@
buffer.insert(buffer.end(),
dex_data.bitmap_storage.begin(),
dex_data.bitmap_storage.end());
+
+ if (StoresAggregationCounters()) {
+ AddUintToBuffer(&buffer, static_cast<uint16_t>(dex_data.class_set.size()));
+ for (const auto& class_id : dex_data.class_set) {
+ uint16_t type_idx = class_id.index_;
+ AddUintToBuffer(&buffer, dex_data.class_counters[type_idx]);
+ }
+ AddUintToBuffer(&buffer, dex_data.GetNumMethodCounters());
+ for (uint16_t method_idx = 0; method_idx < dex_data.num_method_ids; method_idx++) {
+ if (dex_data.GetHotnessInfo(method_idx).IsInProfile()) {
+ AddUintToBuffer(&buffer, dex_data.method_counters[method_idx]);
+ }
+ }
+ }
+ }
+
+ if (StoresAggregationCounters()) {
+ AddUintToBuffer(&buffer, aggregation_count_);
}
uint32_t output_size = 0;
@@ -583,7 +632,8 @@
profile_key,
checksum,
profile_index,
- num_method_ids);
+ num_method_ids,
+ StoresAggregationCounters());
info_.push_back(dex_file_data);
}
DexFileData* result = info_[profile_index];
@@ -943,7 +993,7 @@
// Read magic and version
const size_t kMagicVersionSize =
sizeof(kProfileMagic) +
- sizeof(kProfileVersion) +
+ kProfileVersionSize +
sizeof(uint8_t) + // number of dex files
sizeof(uint32_t) + // size of uncompressed profile data
sizeof(uint32_t); // size of compressed profile data
@@ -959,10 +1009,18 @@
*error = "Profile missing magic";
return kProfileLoadVersionMismatch;
}
- if (!safe_buffer.CompareAndAdvance(kProfileVersion, sizeof(kProfileVersion))) {
+ if (safe_buffer.CountUnreadBytes() < kProfileVersionSize) {
+ *error = "Cannot read profile version";
+ return kProfileLoadBadData;
+ }
+ memcpy(version_, safe_buffer.GetCurrentPtr(), kProfileVersionSize);
+ safe_buffer.Advance(kProfileVersionSize);
+ if ((memcmp(version_, kProfileVersion, kProfileVersionSize) != 0) &&
+ (memcmp(version_, kProfileVersionWithCounters, kProfileVersionSize) != 0)) {
*error = "Profile version mismatch";
return kProfileLoadVersionMismatch;
}
+
if (!safe_buffer.ReadUintAndAdvance<uint8_t>(number_of_dex_files)) {
*error = "Cannot read the number of dex files";
return kProfileLoadBadData;
@@ -1047,6 +1105,7 @@
}
}
+ // Read method bitmap.
const size_t bytes = data->bitmap_storage.size();
if (buffer.CountUnreadBytes() < bytes) {
*error += "Profile EOF reached prematurely for ReadProfileHeaderDexLocation";
@@ -1055,10 +1114,51 @@
const uint8_t* base_ptr = buffer.GetCurrentPtr();
std::copy_n(base_ptr, bytes, data->bitmap_storage.data());
buffer.Advance(bytes);
- // Read method bitmap.
+
+ if (StoresAggregationCounters()) {
+ ReadAggregationCounters(buffer, *data, error);
+ }
+
return kProfileLoadSuccess;
}
+bool ProfileCompilationInfo::ReadAggregationCounters(
+ SafeBuffer& buffer,
+ DexFileData& dex_data,
+ /*out*/std::string* error) {
+ size_t unread_bytes_before_op = buffer.CountUnreadBytes();
+ size_t expected_byte_count = sizeof(uint16_t) *
+ (dex_data.class_set.size() + dex_data.method_map.size() + 2);
+ if (unread_bytes_before_op < expected_byte_count) {
+ *error += "Profile EOF reached prematurely for ReadAggregationCounters";
+ return false;
+ }
+
+ uint16_t num_class_counters;
+ READ_UINT(uint16_t, buffer, num_class_counters, error);
+ if (num_class_counters != dex_data.class_set.size()) {
+ *error = "Invalid class size when reading counters";
+ return false;
+ }
+ for (const auto& class_it : dex_data.class_set) {
+ READ_UINT(uint16_t, buffer, dex_data.class_counters[class_it.index_], error);
+ }
+
+ uint16_t num_method_counters;
+ READ_UINT(uint16_t, buffer, num_method_counters, error);
+ if (num_method_counters != dex_data.GetNumMethodCounters()) {
+ *error = "Invalid class size when reading counters";
+ return false;
+ }
+ for (uint16_t method_idx = 0; method_idx < dex_data.num_method_ids; method_idx++) {
+ if (dex_data.GetHotnessInfo(method_idx).IsInProfile()) {
+ READ_UINT(uint16_t, buffer, dex_data.method_counters[method_idx], error);
+ }
+ }
+
+ return true;
+}
+
// TODO(calin): Fix this API. ProfileCompilationInfo::Load should be static and
// return a unique pointer to a ProfileCompilationInfo upon success.
bool ProfileCompilationInfo::Load(
@@ -1370,9 +1470,17 @@
}
}
+ if (StoresAggregationCounters()) {
+ if (!uncompressed_data.ReadUintAndAdvance<uint16_t>(&aggregation_count_)) {
+ *error = "Cannot read the global aggregation count";
+ return kProfileLoadBadData;
+ }
+ }
+
// Check that we read everything and that profiles don't contain junk data.
if (uncompressed_data.CountUnreadBytes() > 0) {
- *error = "Unexpected content in the profile file";
+ *error = "Unexpected content in the profile file: " +
+ std::to_string(uncompressed_data.CountUnreadBytes()) + " extra bytes";
return kProfileLoadBadData;
} else {
return kProfileLoadSuccess;
@@ -1518,6 +1626,33 @@
other_dex_data->checksum));
DCHECK(dex_data != nullptr);
+ // Merge counters for methods and class. Must be done before we merge the bitmaps so that
+ // we can tell if the data is new or not.
+ if (StoresAggregationCounters()) {
+ // Class aggregation counters.
+ if (merge_classes) {
+ for (const dex::TypeIndex& type_idx : other_dex_data->class_set) {
+ uint16_t amount = other.StoresAggregationCounters()
+ ? other_dex_data->class_counters[type_idx.index_]
+ : (dex_data->ContainsClass(type_idx) ? 1 : 0);
+
+ dex_data->class_counters[type_idx.index_] =
+ IncrementAggregationCounter(dex_data->class_counters[type_idx.index_], amount);
+ }
+ }
+
+ // Method aggregation counters.
+ for (uint16_t method_idx = 0; method_idx < other_dex_data->num_method_ids; method_idx++) {
+ if (other_dex_data->GetHotnessInfo(method_idx).IsInProfile()) {
+ uint16_t amount = other.StoresAggregationCounters()
+ ? other_dex_data->method_counters[method_idx]
+ : (dex_data->GetHotnessInfo(method_idx).IsInProfile() ? 1 : 0);
+ dex_data->method_counters[method_idx] =
+ IncrementAggregationCounter(dex_data->method_counters[method_idx], amount);
+ }
+ }
+ }
+
// Merge the classes.
if (merge_classes) {
dex_data->class_set.insert(other_dex_data->class_set.begin(),
@@ -1552,6 +1687,13 @@
// Merge the method bitmaps.
dex_data->MergeBitmap(*other_dex_data);
}
+
+ // Global aggregation counter.
+ if (StoresAggregationCounters()) {
+ uint16_t amount = other.StoresAggregationCounters() ? other.aggregation_count_ : 1;
+ aggregation_count_ = IncrementAggregationCounter(aggregation_count_, amount);
+ }
+
return true;
}
@@ -1614,11 +1756,7 @@
bool ProfileCompilationInfo::ContainsClass(const DexFile& dex_file, dex::TypeIndex type_idx) const {
const DexFileData* dex_data = FindDexData(&dex_file);
- if (dex_data != nullptr) {
- const ArenaSet<dex::TypeIndex>& classes = dex_data->class_set;
- return classes.find(type_idx) != classes.end();
- }
- return false;
+ return (dex_data != nullptr) && dex_data->ContainsClass(type_idx);
}
uint32_t ProfileCompilationInfo::GetNumberOfMethods() const {
@@ -1753,6 +1891,9 @@
bool ProfileCompilationInfo::Equals(const ProfileCompilationInfo& other) {
// No need to compare profile_key_map_. That's only a cache for fast search.
// All the information is already in the info_ vector.
+ if (memcmp(version_, other.version_, kProfileVersionSize) != 0) {
+ return false;
+ }
if (info_.size() != other.info_.size()) {
return false;
}
@@ -1763,6 +1904,9 @@
return false;
}
}
+ if (aggregation_count_ != other.aggregation_count_) {
+ return false;
+ }
return true;
}
@@ -1965,9 +2109,8 @@
SetMethodHotness(index, flags);
if ((flags & MethodHotness::kFlagHot) != 0) {
- method_map.FindOrAdd(
- index,
- InlineCacheMap(std::less<uint16_t>(), allocator_->Adapter(kArenaAllocProfile)));
+ ProfileCompilationInfo::InlineCacheMap* result = FindOrAddMethod(index);
+ DCHECK(result != nullptr);
}
return true;
}
@@ -2000,6 +2143,43 @@
return ret;
}
+int32_t ProfileCompilationInfo::DexFileData::GetMethodAggregationCounter(
+ uint16_t method_idx) const {
+ CHECK_GT(method_counters.size(), method_idx) << "Profile not prepared for aggregation counters";
+ if (!GetHotnessInfo(method_idx).IsInProfile()) {
+ return -1;
+ }
+
+ return method_counters[method_idx];
+}
+
+int32_t ProfileCompilationInfo::DexFileData::GetClassAggregationCounter(uint16_t type_idx) const {
+ CHECK_GT(class_counters.size(), type_idx) << "Profile not prepared for aggregation counters";
+ if (!ContainsClass(dex::TypeIndex(type_idx))) {
+ return -1;
+ }
+
+ return class_counters[type_idx];
+}
+
+int32_t ProfileCompilationInfo::GetMethodAggregationCounter(
+ const MethodReference& method_ref) const {
+ CHECK(StoresAggregationCounters()) << "Profile not prepared for aggregation counters";
+ const DexFileData* dex_data = FindDexData(method_ref.dex_file);
+ return dex_data == nullptr ? -1 : dex_data->GetMethodAggregationCounter(method_ref.index);
+}
+
+int32_t ProfileCompilationInfo::GetClassAggregationCounter(const TypeReference& type_ref) const {
+ CHECK(StoresAggregationCounters()) << "Profile not prepared for aggregation counters";
+ const DexFileData* dex_data = FindDexData(type_ref.dex_file);
+ return dex_data == nullptr ? -1 : dex_data->GetClassAggregationCounter(type_ref.index);
+}
+
+uint16_t ProfileCompilationInfo::GetAggregationCounter() const {
+ CHECK(StoresAggregationCounters()) << "Profile not prepared for aggregation counters";
+ return aggregation_count_;
+}
+
ProfileCompilationInfo::DexPcData*
ProfileCompilationInfo::FindOrAddDexPc(InlineCacheMap* inline_cache, uint32_t dex_pc) {
return &(inline_cache->FindOrAdd(dex_pc, DexPcData(&allocator_))->second);
@@ -2018,7 +2198,7 @@
<< type_idx.index_ << " in dex " << dex_file->GetLocation();
return HashSet<std::string>();
}
- const DexFile::TypeId& type_id = dex_file->GetTypeId(type_idx);
+ const dex::TypeId& type_id = dex_file->GetTypeId(type_idx);
ret.insert(dex_file->GetTypeDescriptor(type_id));
}
} else {
@@ -2096,4 +2276,46 @@
profile_key_map_.clear();
}
+bool ProfileCompilationInfo::StoresAggregationCounters() const {
+ return memcmp(version_, kProfileVersionWithCounters, sizeof(kProfileVersionWithCounters)) == 0;
+}
+
+void ProfileCompilationInfo::PrepareForAggregationCounters() {
+ InitProfileVersionInternal(kProfileVersionWithCounters);
+ for (DexFileData* dex_data : info_) {
+ dex_data->PrepareForAggregationCounters();
+ }
+}
+
+void ProfileCompilationInfo::DexFileData::PrepareForAggregationCounters() {
+ method_counters.resize(num_method_ids);
+ // TODO(calin): we should store the maximum number of types in the profile.
+ // It will simplify quite a few things and make this storage allocation
+ // more efficient.
+ size_t max_elems = 1 << (kBitsPerByte * sizeof(uint16_t));
+ class_counters.resize(max_elems);
+}
+
+const uint8_t* ProfileCompilationInfo::GetVersion() const {
+ return version_;
+}
+
+void ProfileCompilationInfo::InitProfileVersionInternal(const uint8_t version[]) {
+ CHECK(
+ (memcmp(version, kProfileVersion, kProfileVersionSize) == 0) ||
+ (memcmp(version, kProfileVersionWithCounters, kProfileVersionSize) == 0));
+ memcpy(version_, version, kProfileVersionSize);
+}
+
+uint16_t ProfileCompilationInfo::DexFileData::GetNumMethodCounters() const {
+ uint16_t num_method_counters = 0;
+ for (uint16_t method_idx = 0; method_idx < num_method_ids; method_idx++) {
+ num_method_counters += GetHotnessInfo(method_idx).IsInProfile() ? 1 : 0;
+ }
+ return num_method_counters;
+}
+
+bool ProfileCompilationInfo::DexFileData::ContainsClass(const dex::TypeIndex type_index) const {
+ return class_set.find(type_index) != class_set.end();
+}
} // namespace art
diff --git a/libprofile/profile/profile_compilation_info.h b/libprofile/profile/profile_compilation_info.h
index 92fa098..fa4615b 100644
--- a/libprofile/profile/profile_compilation_info.h
+++ b/libprofile/profile/profile_compilation_info.h
@@ -73,9 +73,10 @@
public:
static const uint8_t kProfileMagic[];
static const uint8_t kProfileVersion[];
-
+ static const uint8_t kProfileVersionWithCounters[];
static const char kDexMetadataProfileEntry[];
+ static constexpr size_t kProfileVersionSize = 4;
static constexpr uint8_t kIndividualInlineCacheSize = 5;
// Data structures for encoding the offline representation of inline caches.
@@ -447,6 +448,30 @@
// Clears all the data from the profile.
void ClearData();
+ // Prepare the profile to store aggregation counters.
+ // This will change the profile version and allocate extra storage for the counters.
+ // It allocates 2 bytes for every possible method and class, so do not use in performance
+ // critical code which needs to be memory efficient.
+ void PrepareForAggregationCounters();
+
+ // Returns true if the profile is configured to store aggregation counters.
+ bool StoresAggregationCounters() const;
+
+ // Returns the aggregation counter for the given method.
+ // Returns -1 if the method is not in the profile.
+ // CHECKs that the profile is configured to store aggregations counters.
+ int32_t GetMethodAggregationCounter(const MethodReference& method_ref) const;
+ // Returns the aggregation counter for the given class.
+ // Returns -1 if the class is not in the profile.
+ // CHECKs that the profile is configured to store aggregations counters.
+ int32_t GetClassAggregationCounter(const TypeReference& type_ref) const;
+ // Returns the number of times the profile was merged.
+ // CHECKs that the profile is configured to store aggregations counters.
+ uint16_t GetAggregationCounter() const;
+
+ // Return the version of this profile.
+ const uint8_t* GetVersion() const;
+
private:
enum ProfileLoadStatus {
kProfileLoadWouldOverwiteData,
@@ -470,7 +495,8 @@
const std::string& key,
uint32_t location_checksum,
uint16_t index,
- uint32_t num_methods)
+ uint32_t num_methods,
+ bool store_aggregation_counters)
: allocator_(allocator),
profile_key(key),
profile_index(index),
@@ -478,13 +504,18 @@
method_map(std::less<uint16_t>(), allocator->Adapter(kArenaAllocProfile)),
class_set(std::less<dex::TypeIndex>(), allocator->Adapter(kArenaAllocProfile)),
num_method_ids(num_methods),
- bitmap_storage(allocator->Adapter(kArenaAllocProfile)) {
+ bitmap_storage(allocator->Adapter(kArenaAllocProfile)),
+ method_counters(allocator->Adapter(kArenaAllocProfile)),
+ class_counters(allocator->Adapter(kArenaAllocProfile)) {
bitmap_storage.resize(ComputeBitmapStorage(num_method_ids));
if (!bitmap_storage.empty()) {
method_bitmap =
BitMemoryRegion(MemoryRegion(
&bitmap_storage[0], bitmap_storage.size()), 0, ComputeBitmapBits(num_method_ids));
}
+ if (store_aggregation_counters) {
+ PrepareForAggregationCounters();
+ }
}
static size_t ComputeBitmapBits(uint32_t num_method_ids) {
@@ -495,7 +526,13 @@
}
bool operator==(const DexFileData& other) const {
- return checksum == other.checksum && method_map == other.method_map;
+ return checksum == other.checksum &&
+ num_method_ids == other.num_method_ids &&
+ method_map == other.method_map &&
+ class_set == other.class_set &&
+ (BitMemoryRegion::Compare(method_bitmap, other.method_bitmap) == 0) &&
+ class_counters == other.class_counters &&
+ method_counters == other.method_counters;
}
// Mark a method as executed at least once.
@@ -510,6 +547,14 @@
void SetMethodHotness(size_t index, MethodHotness::Flag flags);
MethodHotness GetHotnessInfo(uint32_t dex_method_index) const;
+ void PrepareForAggregationCounters();
+
+ int32_t GetMethodAggregationCounter(uint16_t method_index) const;
+ int32_t GetClassAggregationCounter(uint16_t type_index) const;
+
+ uint16_t GetNumMethodCounters() const;
+
+ bool ContainsClass(const dex::TypeIndex type_index) const;
// The allocator used to allocate new inline cache maps.
ArenaAllocator* const allocator_;
@@ -519,7 +564,7 @@
uint8_t profile_index;
// The dex checksum.
uint32_t checksum;
- // The methonds' profile information.
+ // The methods' profile information.
MethodMap method_map;
// The classes which have been profiled. Note that these don't necessarily include
// all the classes that can be found in the inline caches reference.
@@ -531,6 +576,8 @@
uint32_t num_method_ids;
ArenaVector<uint8_t> bitmap_storage;
BitMemoryRegion method_bitmap;
+ ArenaVector<uint16_t> method_counters;
+ ArenaVector<uint16_t> class_counters;
private:
enum BitmapIndex {
@@ -761,6 +808,11 @@
const SafeMap<uint8_t, uint8_t>& dex_profile_index_remap,
/*out*/std::string* error);
+ // Read the aggregation counters from the buffer.
+ bool ReadAggregationCounters(SafeBuffer& buffer,
+ DexFileData& dex_data,
+ /*out*/std::string* error);
+
// The method generates mapping of profile indices while merging a new profile
// data into current data. It returns true, if the mapping was successful.
bool RemapProfileIndex(const std::vector<ProfileLineHeader>& profile_line_headers,
@@ -792,6 +844,9 @@
// if no previous data exists.
DexPcData* FindOrAddDexPc(InlineCacheMap* inline_cache, uint32_t dex_pc);
+ // Initializes the profile version to the desired one.
+ void InitProfileVersionInternal(const uint8_t version[]);
+
friend class ProfileCompilationInfoTest;
friend class CompilerDriverProfileTest;
friend class ProfileAssistantTest;
@@ -809,6 +864,14 @@
// This is used to speed up searches since it avoids iterating
// over the info_ vector when searching by profile key.
ArenaSafeMap<const std::string, uint8_t> profile_key_map_;
+
+ // The version of the profile.
+ // This may change if a "normal" profile is transformed to keep track
+ // of aggregation counters.
+ uint8_t version_[kProfileVersionSize];
+
+ // Stored only when the profile is configured to keep track of aggregation counters.
+ uint16_t aggregation_count_;
};
} // namespace art
diff --git a/libprofile/profile/profile_compilation_info_test.cc b/libprofile/profile/profile_compilation_info_test.cc
index a2bfe50..47019c4 100644
--- a/libprofile/profile/profile_compilation_info_test.cc
+++ b/libprofile/profile/profile_compilation_info_test.cc
@@ -1141,4 +1141,180 @@
ASSERT_TRUE(loaded_info.Equals(info));
}
+TEST_F(ProfileCompilationInfoTest, PrepareForAggregationCounters) {
+ ProfileCompilationInfo info;
+ ASSERT_EQ(
+ memcmp(info.GetVersion(),
+ ProfileCompilationInfo::kProfileVersion,
+ ProfileCompilationInfo::kProfileVersionSize),
+ 0);
+
+ info.PrepareForAggregationCounters();
+
+ ASSERT_EQ(
+ memcmp(info.GetVersion(),
+ ProfileCompilationInfo::kProfileVersionWithCounters,
+ ProfileCompilationInfo::kProfileVersionSize),
+ 0);
+ ASSERT_TRUE(info.StoresAggregationCounters());
+ ASSERT_EQ(info.GetAggregationCounter(), 0);
+}
+
+TEST_F(ProfileCompilationInfoTest, MergeWithAggregationCounters) {
+ ProfileCompilationInfo info1;
+ info1.PrepareForAggregationCounters();
+
+ ProfileCompilationInfo info2;
+ ProfileCompilationInfo info3;
+
+ std::unique_ptr<const DexFile> dex(OpenTestDexFile("ManyMethods"));
+ std::string location = dex->GetLocation();
+ int checksum = dex->GetLocationChecksum();
+
+ AddMethod(location, checksum, /* method_idx= */ 1, &info1);
+
+ AddMethod(location, checksum, /* method_idx= */ 2, &info1);
+ AddMethod(location, checksum, /* method_idx= */ 2, &info2);
+
+ info1.AddMethodIndex(Hotness::kFlagStartup, location, checksum, 3, kMaxMethodIds);
+ info2.AddMethodIndex(Hotness::kFlagPostStartup, location, checksum, 3, kMaxMethodIds);
+ info3.AddMethodIndex(Hotness::kFlagStartup, location, checksum, 3, kMaxMethodIds);
+
+ AddMethod(location, checksum, /* method_idx= */ 6, &info2);
+ AddMethod(location, checksum, /* method_idx= */ 6, &info3);
+
+ AddClass(location, checksum, dex::TypeIndex(10), &info1);
+
+ AddClass(location, checksum, dex::TypeIndex(20), &info1);
+ AddClass(location, checksum, dex::TypeIndex(20), &info2);
+
+ AddClass(location, checksum, dex::TypeIndex(30), &info1);
+ AddClass(location, checksum, dex::TypeIndex(30), &info2);
+ AddClass(location, checksum, dex::TypeIndex(30), &info3);
+
+ ASSERT_EQ(info1.GetAggregationCounter(), 0);
+ info1.MergeWith(info2);
+ ASSERT_EQ(info1.GetAggregationCounter(), 1);
+ info1.MergeWith(info3);
+ ASSERT_EQ(info1.GetAggregationCounter(), 2);
+
+ ASSERT_EQ(0, info1.GetMethodAggregationCounter(MethodReference(dex.get(), 1)));
+ ASSERT_EQ(1, info1.GetMethodAggregationCounter(MethodReference(dex.get(), 2)));
+ ASSERT_EQ(2, info1.GetMethodAggregationCounter(MethodReference(dex.get(), 3)));
+ ASSERT_EQ(1, info1.GetMethodAggregationCounter(MethodReference(dex.get(), 6)));
+
+ ASSERT_EQ(0, info1.GetClassAggregationCounter(TypeReference(dex.get(), dex::TypeIndex(10))));
+ ASSERT_EQ(1, info1.GetClassAggregationCounter(TypeReference(dex.get(), dex::TypeIndex(20))));
+ ASSERT_EQ(2, info1.GetClassAggregationCounter(TypeReference(dex.get(), dex::TypeIndex(30))));
+
+ // Check methods that do not exists.
+ ASSERT_EQ(-1, info1.GetMethodAggregationCounter(MethodReference(dex.get(), 4)));
+ ASSERT_EQ(-1, info1.GetClassAggregationCounter(TypeReference(dex.get(), dex::TypeIndex(40))));
+}
+
+TEST_F(ProfileCompilationInfoTest, SaveAndLoadAggregationCounters) {
+ ProfileCompilationInfo info1;
+ info1.PrepareForAggregationCounters();
+
+ ProfileCompilationInfo info2;
+ ProfileCompilationInfo info3;
+
+ std::unique_ptr<const DexFile> dex(OpenTestDexFile("ManyMethods"));
+ std::string location = dex->GetLocation();
+ int checksum = dex->GetLocationChecksum();
+
+ AddMethod(location, checksum, /* method_idx= */ 1, &info1);
+
+ AddMethod(location, checksum, /* method_idx= */ 2, &info1);
+ AddMethod(location, checksum, /* method_idx= */ 2, &info2);
+
+ info1.AddMethodIndex(Hotness::kFlagStartup, location, checksum, 3, kMaxMethodIds);
+ info2.AddMethodIndex(Hotness::kFlagPostStartup, location, checksum, 3, kMaxMethodIds);
+ info3.AddMethodIndex(Hotness::kFlagStartup, location, checksum, 3, kMaxMethodIds);
+
+ AddMethod(location, checksum, /* method_idx= */ 6, &info2);
+ AddMethod(location, checksum, /* method_idx= */ 6, &info3);
+
+ AddClass(location, checksum, dex::TypeIndex(10), &info1);
+
+ AddClass(location, checksum, dex::TypeIndex(20), &info1);
+ AddClass(location, checksum, dex::TypeIndex(20), &info2);
+
+ AddClass(location, checksum, dex::TypeIndex(30), &info1);
+ AddClass(location, checksum, dex::TypeIndex(30), &info2);
+ AddClass(location, checksum, dex::TypeIndex(30), &info3);
+
+ info1.MergeWith(info2);
+ info1.MergeWith(info3);
+
+ ScratchFile profile;
+
+ ASSERT_TRUE(info1.Save(GetFd(profile)));
+ ASSERT_EQ(0, profile.GetFile()->Flush());
+
+ // Check that we get back what we saved.
+ ProfileCompilationInfo loaded_info;
+ loaded_info.PrepareForAggregationCounters();
+ ASSERT_TRUE(profile.GetFile()->ResetOffset());
+ ASSERT_TRUE(loaded_info.Load(GetFd(profile)));
+ ASSERT_TRUE(loaded_info.Equals(info1));
+
+ ASSERT_EQ(2, loaded_info.GetAggregationCounter());
+
+ ASSERT_EQ(0, loaded_info.GetMethodAggregationCounter(MethodReference(dex.get(), 1)));
+ ASSERT_EQ(1, loaded_info.GetMethodAggregationCounter(MethodReference(dex.get(), 2)));
+ ASSERT_EQ(2, loaded_info.GetMethodAggregationCounter(MethodReference(dex.get(), 3)));
+ ASSERT_EQ(1, loaded_info.GetMethodAggregationCounter(MethodReference(dex.get(), 6)));
+
+ ASSERT_EQ(0, loaded_info.GetClassAggregationCounter(TypeReference(dex.get(), dex::TypeIndex(10))));
+ ASSERT_EQ(1, loaded_info.GetClassAggregationCounter(TypeReference(dex.get(), dex::TypeIndex(20))));
+ ASSERT_EQ(2, loaded_info.GetClassAggregationCounter(TypeReference(dex.get(), dex::TypeIndex(30))));
+}
+
+TEST_F(ProfileCompilationInfoTest, MergeTwoWithAggregationCounters) {
+ ProfileCompilationInfo info1;
+ info1.PrepareForAggregationCounters();
+
+ ProfileCompilationInfo info2;
+
+ std::unique_ptr<const DexFile> dex(OpenTestDexFile("ManyMethods"));
+ std::string location = dex->GetLocation();
+ int checksum = dex->GetLocationChecksum();
+
+ AddMethod(location, checksum, /* method_idx= */ 1, &info1);
+
+ AddMethod(location, checksum, /* method_idx= */ 2, &info1);
+ AddMethod(location, checksum, /* method_idx= */ 2, &info2);
+
+ AddClass(location, checksum, dex::TypeIndex(20), &info1);
+
+ AddClass(location, checksum, dex::TypeIndex(10), &info1);
+ AddClass(location, checksum, dex::TypeIndex(10), &info2);
+
+ info1.MergeWith(info2);
+ info1.MergeWith(info2);
+ ASSERT_EQ(2, info1.GetAggregationCounter());
+
+ // Save and load the profile to create a copy of the data
+ ScratchFile profile;
+ info1.Save(GetFd(profile));
+ ASSERT_EQ(0, profile.GetFile()->Flush());
+
+ ProfileCompilationInfo loaded_info;
+ loaded_info.PrepareForAggregationCounters();
+ profile.GetFile()->ResetOffset();
+ loaded_info.Load(GetFd(profile));
+
+ // Merge the data
+ info1.MergeWith(loaded_info);
+
+ ASSERT_EQ(4, info1.GetAggregationCounter());
+
+ ASSERT_EQ(0, info1.GetMethodAggregationCounter(MethodReference(dex.get(), 1)));
+ ASSERT_EQ(4, info1.GetMethodAggregationCounter(MethodReference(dex.get(), 2)));
+
+ ASSERT_EQ(4, info1.GetClassAggregationCounter(TypeReference(dex.get(), dex::TypeIndex(10))));
+ ASSERT_EQ(0, info1.GetClassAggregationCounter(TypeReference(dex.get(), dex::TypeIndex(20))));
+}
+
} // namespace art
diff --git a/oatdump/oatdump.cc b/oatdump/oatdump.cc
index d2a5bb8..4e1276e 100644
--- a/oatdump/oatdump.cc
+++ b/oatdump/oatdump.cc
@@ -296,7 +296,7 @@
const DexFile& dex_file,
uint32_t class_def_index,
uint32_t dex_method_index,
- const DexFile::CodeItem* code_item,
+ const dex::CodeItem* code_item,
uint32_t method_access_flags) {
if ((method_access_flags & kAccAbstract) != 0) {
// Abstract method, no code.
@@ -723,7 +723,7 @@
<< "': " << error_msg;
} else {
const char* descriptor = m->GetDeclaringClassDescriptor();
- const DexFile::ClassDef* class_def =
+ const dex::ClassDef* class_def =
OatDexFile::FindClassDef(*dex_file, descriptor, ComputeModifiedUtf8Hash(descriptor));
if (class_def != nullptr) {
uint16_t class_def_index = dex_file->GetIndexForClassDef(*class_def);
@@ -1092,12 +1092,12 @@
static constexpr uint32_t kMaxCodeSize = 100 * 1000;
bool DumpOatMethod(VariableIndentationOutputStream* vios,
- const DexFile::ClassDef& class_def,
+ const dex::ClassDef& class_def,
uint32_t class_method_index,
const OatFile::OatClass& oat_class,
const DexFile& dex_file,
uint32_t dex_method_idx,
- const DexFile::CodeItem* code_item,
+ const dex::CodeItem* code_item,
uint32_t method_access_flags,
bool* addr_found) {
bool success = true;
@@ -1490,8 +1490,8 @@
StackHandleScope<1>* hs,
uint32_t dex_method_idx,
const DexFile* dex_file,
- const DexFile::ClassDef& class_def,
- const DexFile::CodeItem* code_item,
+ const dex::ClassDef& class_def,
+ const dex::CodeItem* code_item,
uint32_t method_access_flags) {
if ((method_access_flags & kAccNative) == 0) {
ScopedObjectAccess soa(Thread::Current());
@@ -3007,7 +3007,7 @@
for (uint32_t class_def_index = 0;
class_def_index != dex_file->NumClassDefs();
++class_def_index) {
- const DexFile::ClassDef& class_def = dex_file->GetClassDef(class_def_index);
+ const dex::ClassDef& class_def = dex_file->GetClassDef(class_def_index);
const char* descriptor = dex_file->GetClassDescriptor(class_def);
h_klass.Assign(class_linker->FindClass(self, descriptor, h_class_loader));
if (h_klass == nullptr) {
diff --git a/openjdkjvmti/ti_class.cc b/openjdkjvmti/ti_class.cc
index 0c07f56..a8e220c 100644
--- a/openjdkjvmti/ti_class.cc
+++ b/openjdkjvmti/ti_class.cc
@@ -162,9 +162,9 @@
art::Handle<art::mirror::Class> klass,
art::Handle<art::mirror::ClassLoader> class_loader,
const art::DexFile& initial_dex_file,
- const art::DexFile::ClassDef& initial_class_def ATTRIBUTE_UNUSED,
+ const art::dex::ClassDef& initial_class_def ATTRIBUTE_UNUSED,
/*out*/art::DexFile const** final_dex_file,
- /*out*/art::DexFile::ClassDef const** final_class_def)
+ /*out*/art::dex::ClassDef const** final_class_def)
override REQUIRES_SHARED(art::Locks::mutator_lock_) {
bool is_enabled =
event_handler->IsEventEnabledAnywhere(ArtJvmtiEvent::kClassFileLoadHookRetransformable) ||
diff --git a/openjdkjvmti/ti_redefine.cc b/openjdkjvmti/ti_redefine.cc
index 6ca4e38..4bcb7b2 100644
--- a/openjdkjvmti/ti_redefine.cc
+++ b/openjdkjvmti/ti_redefine.cc
@@ -49,6 +49,7 @@
#include "dex/dex_file.h"
#include "dex/dex_file_loader.h"
#include "dex/dex_file_types.h"
+#include "dex/signature-inl.h"
#include "events-inl.h"
#include "gc/allocation_listener.h"
#include "gc/heap.h"
@@ -629,7 +630,7 @@
// and removals. We should have already checked the fields.
for (const art::ClassAccessor::Method& method : accessor.GetMethods()) {
// Get the data on the method we are searching for
- const art::DexFile::MethodId& new_method_id = dex_file_->GetMethodId(method.GetIndex());
+ const art::dex::MethodId& new_method_id = dex_file_->GetMethodId(method.GetIndex());
const char* new_method_name = dex_file_->GetMethodName(new_method_id);
art::Signature new_method_signature = dex_file_->GetMethodSignature(new_method_id);
art::ArtMethod* old_method = FindMethod(h_klass, new_method_name, new_method_signature);
@@ -672,7 +673,7 @@
auto old_iter = old_fields.begin();
for (const art::ClassAccessor::Field& new_field : new_accessor.GetFields()) {
// Get the data on the method we are searching for
- const art::DexFile::FieldId& new_field_id = dex_file_->GetFieldId(new_field.GetIndex());
+ const art::dex::FieldId& new_field_id = dex_file_->GetFieldId(new_field.GetIndex());
const char* new_field_name = dex_file_->GetFieldName(new_field_id);
const char* new_field_type = dex_file_->GetFieldTypeDescriptor(new_field_id);
@@ -685,7 +686,7 @@
return false;
}
- const art::DexFile::FieldId& old_field_id = old_dex_file.GetFieldId(old_iter->GetIndex());
+ const art::dex::FieldId& old_field_id = old_dex_file.GetFieldId(old_iter->GetIndex());
const char* old_field_name = old_dex_file.GetFieldName(old_field_id);
const char* old_field_type = old_dex_file.GetFieldTypeDescriptor(old_field_id);
@@ -736,7 +737,7 @@
}
// Get the ClassDef from the new DexFile.
// Since the dex file has only a single class def the index is always 0.
- const art::DexFile::ClassDef& def = dex_file_->GetClassDef(0);
+ const art::dex::ClassDef& def = dex_file_->GetClassDef(0);
// Get the class as it is now.
art::Handle<art::mirror::Class> current_class(hs.NewHandle(GetMirrorClass()));
@@ -773,7 +774,7 @@
return false;
}
}
- const art::DexFile::TypeList* interfaces = dex_file_->GetInterfacesList(def);
+ const art::dex::TypeList* interfaces = dex_file_->GetInterfacesList(def);
if (interfaces == nullptr) {
if (current_class->NumDirectInterfaces() != 0) {
RecordFailure(ERR(UNSUPPORTED_REDEFINITION_HIERARCHY_CHANGED), "Interfaces added");
@@ -781,7 +782,7 @@
}
} else {
DCHECK(!current_class->IsProxyClass());
- const art::DexFile::TypeList* current_interfaces = current_class->GetInterfaceTypeList();
+ const art::dex::TypeList* current_interfaces = current_class->GetInterfaceTypeList();
if (current_interfaces == nullptr || current_interfaces->Size() != interfaces->Size()) {
RecordFailure(ERR(UNSUPPORTED_REDEFINITION_HIERARCHY_CHANGED), "Interfaces added or removed");
return false;
@@ -1394,14 +1395,14 @@
}
void Redefiner::ClassRedefinition::UpdateMethods(art::ObjPtr<art::mirror::Class> mclass,
- const art::DexFile::ClassDef& class_def) {
+ const art::dex::ClassDef& class_def) {
art::ClassLinker* linker = driver_->runtime_->GetClassLinker();
art::PointerSize image_pointer_size = linker->GetImagePointerSize();
- const art::DexFile::TypeId& declaring_class_id = dex_file_->GetTypeId(class_def.class_idx_);
+ const art::dex::TypeId& declaring_class_id = dex_file_->GetTypeId(class_def.class_idx_);
const art::DexFile& old_dex_file = mclass->GetDexFile();
// Update methods.
for (art::ArtMethod& method : mclass->GetDeclaredMethods(image_pointer_size)) {
- const art::DexFile::StringId* new_name_id = dex_file_->FindStringId(method.GetName());
+ const art::dex::StringId* new_name_id = dex_file_->FindStringId(method.GetName());
art::dex::TypeIndex method_return_idx =
dex_file_->GetIndexForTypeId(*dex_file_->FindTypeId(method.GetReturnTypeDescriptor()));
const auto* old_type_list = method.GetParameterTypeList();
@@ -1414,12 +1415,11 @@
old_dex_file.GetTypeId(
old_type_list->GetTypeItem(i).type_idx_)))));
}
- const art::DexFile::ProtoId* proto_id = dex_file_->FindProtoId(method_return_idx,
- new_type_list);
+ const art::dex::ProtoId* proto_id = dex_file_->FindProtoId(method_return_idx, new_type_list);
CHECK(proto_id != nullptr || old_type_list == nullptr);
- const art::DexFile::MethodId* method_id = dex_file_->FindMethodId(declaring_class_id,
- *new_name_id,
- *proto_id);
+ const art::dex::MethodId* method_id = dex_file_->FindMethodId(declaring_class_id,
+ *new_name_id,
+ *proto_id);
CHECK(method_id != nullptr);
uint32_t dex_method_idx = dex_file_->GetIndexForMethodId(*method_id);
method.SetDexMethodIndex(dex_method_idx);
@@ -1435,12 +1435,12 @@
for (auto fields_iter : {mclass->GetIFields(), mclass->GetSFields()}) {
for (art::ArtField& field : fields_iter) {
std::string declaring_class_name;
- const art::DexFile::TypeId* new_declaring_id =
+ const art::dex::TypeId* new_declaring_id =
dex_file_->FindTypeId(field.GetDeclaringClass()->GetDescriptor(&declaring_class_name));
- const art::DexFile::StringId* new_name_id = dex_file_->FindStringId(field.GetName());
- const art::DexFile::TypeId* new_type_id = dex_file_->FindTypeId(field.GetTypeDescriptor());
+ const art::dex::StringId* new_name_id = dex_file_->FindStringId(field.GetName());
+ const art::dex::TypeId* new_type_id = dex_file_->FindTypeId(field.GetTypeDescriptor());
CHECK(new_name_id != nullptr && new_type_id != nullptr && new_declaring_id != nullptr);
- const art::DexFile::FieldId* new_field_id =
+ const art::dex::FieldId* new_field_id =
dex_file_->FindFieldId(*new_declaring_id, *new_name_id, *new_type_id);
CHECK(new_field_id != nullptr);
// We only need to update the index since the other data in the ArtField cannot be updated.
@@ -1455,7 +1455,7 @@
art::ObjPtr<art::mirror::DexCache> new_dex_cache,
art::ObjPtr<art::mirror::Object> original_dex_file) {
DCHECK_EQ(dex_file_->NumClassDefs(), 1u);
- const art::DexFile::ClassDef& class_def = dex_file_->GetClassDef(0);
+ const art::dex::ClassDef& class_def = dex_file_->GetClassDef(0);
UpdateMethods(mclass, class_def);
UpdateFields(mclass);
diff --git a/openjdkjvmti/ti_redefine.h b/openjdkjvmti/ti_redefine.h
index f4a4280..a974dc1 100644
--- a/openjdkjvmti/ti_redefine.h
+++ b/openjdkjvmti/ti_redefine.h
@@ -39,13 +39,19 @@
#include "art_jvmti.h"
#include "base/array_ref.h"
#include "base/globals.h"
-#include "dex/dex_file.h"
#include "jni/jni_env_ext-inl.h"
#include "jvmti.h"
#include "mirror/array.h"
#include "mirror/class.h"
#include "obj_ptr.h"
+namespace art {
+namespace dex {
+struct ClassDef;
+} // namespace dex
+class DexFile;
+} // namespace art
+
namespace openjdkjvmti {
class ArtClassDefinition;
@@ -172,7 +178,7 @@
REQUIRES(art::Locks::mutator_lock_);
void UpdateMethods(art::ObjPtr<art::mirror::Class> mclass,
- const art::DexFile::ClassDef& class_def)
+ const art::dex::ClassDef& class_def)
REQUIRES(art::Locks::mutator_lock_);
void UpdateClass(art::ObjPtr<art::mirror::Class> mclass,
diff --git a/profman/profile_assistant.cc b/profman/profile_assistant.cc
index 4dc5262..b65bb43 100644
--- a/profman/profile_assistant.cc
+++ b/profman/profile_assistant.cc
@@ -32,7 +32,8 @@
ProfileAssistant::ProcessingResult ProfileAssistant::ProcessProfilesInternal(
const std::vector<ScopedFlock>& profile_files,
const ScopedFlock& reference_profile_file,
- const ProfileCompilationInfo::ProfileLoadFilterFn& filter_fn) {
+ const ProfileCompilationInfo::ProfileLoadFilterFn& filter_fn,
+ bool store_aggregation_counters) {
DCHECK(!profile_files.empty());
ProfileCompilationInfo info;
@@ -42,6 +43,12 @@
return kErrorBadProfiles;
}
+ // If we need to store aggregation counters (e.g. for the boot image profile),
+ // prepare the reference profile now.
+ if (store_aggregation_counters) {
+ info.PrepareForAggregationCounters();
+ }
+
// Store the current state of the reference profile before merging with the current profiles.
uint32_t number_of_methods = info.GetNumberOfMethods();
uint32_t number_of_classes = info.GetNumberOfResolvedClasses();
@@ -124,7 +131,8 @@
ProfileAssistant::ProcessingResult ProfileAssistant::ProcessProfiles(
const std::vector<int>& profile_files_fd,
int reference_profile_file_fd,
- const ProfileCompilationInfo::ProfileLoadFilterFn& filter_fn) {
+ const ProfileCompilationInfo::ProfileLoadFilterFn& filter_fn,
+ bool store_aggregation_counters) {
DCHECK_GE(reference_profile_file_fd, 0);
std::string error;
@@ -147,13 +155,15 @@
return ProcessProfilesInternal(profile_files.Get(),
reference_profile_file,
- filter_fn);
+ filter_fn,
+ store_aggregation_counters);
}
ProfileAssistant::ProcessingResult ProfileAssistant::ProcessProfiles(
const std::vector<std::string>& profile_files,
const std::string& reference_profile_file,
- const ProfileCompilationInfo::ProfileLoadFilterFn& filter_fn) {
+ const ProfileCompilationInfo::ProfileLoadFilterFn& filter_fn,
+ bool store_aggregation_counters) {
std::string error;
ScopedFlockList profile_files_list(profile_files.size());
@@ -171,7 +181,8 @@
return ProcessProfilesInternal(profile_files_list.Get(),
locked_reference_profile_file,
- filter_fn);
+ filter_fn,
+ store_aggregation_counters);
}
} // namespace art
diff --git a/profman/profile_assistant.h b/profman/profile_assistant.h
index c1d6f8e..45d4e38 100644
--- a/profman/profile_assistant.h
+++ b/profman/profile_assistant.h
@@ -55,19 +55,22 @@
const std::vector<std::string>& profile_files,
const std::string& reference_profile_file,
const ProfileCompilationInfo::ProfileLoadFilterFn& filter_fn
- = ProfileCompilationInfo::ProfileFilterFnAcceptAll);
+ = ProfileCompilationInfo::ProfileFilterFnAcceptAll,
+ bool store_aggregation_counters = false);
static ProcessingResult ProcessProfiles(
const std::vector<int>& profile_files_fd_,
int reference_profile_file_fd,
const ProfileCompilationInfo::ProfileLoadFilterFn& filter_fn
- = ProfileCompilationInfo::ProfileFilterFnAcceptAll);
+ = ProfileCompilationInfo::ProfileFilterFnAcceptAll,
+ bool store_aggregation_counters = false);
private:
static ProcessingResult ProcessProfilesInternal(
const std::vector<ScopedFlock>& profile_files,
const ScopedFlock& reference_profile_file,
- const ProfileCompilationInfo::ProfileLoadFilterFn& filter_fn);
+ const ProfileCompilationInfo::ProfileLoadFilterFn& filter_fn,
+ bool store_aggregation_counters);
DISALLOW_COPY_AND_ASSIGN(ProfileAssistant);
};
diff --git a/profman/profile_assistant_test.cc b/profman/profile_assistant_test.cc
index e9d3290..e906151 100644
--- a/profman/profile_assistant_test.cc
+++ b/profman/profile_assistant_test.cc
@@ -102,7 +102,7 @@
}
}
for (uint16_t i = 0; i < number_of_classes; i++) {
- ASSERT_TRUE(info->AddClassIndex(dex_location1,
+ ASSERT_TRUE(info->AddClassIndex(ProfileCompilationInfo::GetProfileDexFileKey(dex_location1),
dex_location_checksum1,
dex::TypeIndex(i),
number_of_methods1));
@@ -1300,4 +1300,57 @@
}
}
+TEST_F(ProfileAssistantTest, MergeProfilesWithCounters) {
+ ScratchFile profile1;
+ ScratchFile profile2;
+ ScratchFile reference_profile;
+
+ // The new profile info will contain methods with indices 0-100.
+ const uint16_t kNumberOfMethodsToEnableCompilation = 100;
+ const uint16_t kNumberOfClasses = 50;
+
+ std::vector<std::unique_ptr<const DexFile>> dex_files = OpenTestDexFiles("ProfileTestMultiDex");
+ const DexFile& d1 = *dex_files[0];
+ const DexFile& d2 = *dex_files[1];
+ ProfileCompilationInfo info1;
+ SetupProfile(
+ d1.GetLocation(), d1.GetLocationChecksum(),
+ d2.GetLocation(), d2.GetLocationChecksum(),
+ kNumberOfMethodsToEnableCompilation, kNumberOfClasses, profile1, &info1);
+ ProfileCompilationInfo info2;
+ SetupProfile(
+ d1.GetLocation(), d1.GetLocationChecksum(),
+ d2.GetLocation(), d2.GetLocationChecksum(),
+ kNumberOfMethodsToEnableCompilation, kNumberOfClasses, profile2, &info2);
+
+ std::string profman_cmd = GetProfmanCmd();
+ std::vector<std::string> argv_str;
+ argv_str.push_back(profman_cmd);
+ argv_str.push_back("--profile-file-fd=" + std::to_string(profile1.GetFd()));
+ argv_str.push_back("--profile-file-fd=" + std::to_string(profile2.GetFd()));
+ argv_str.push_back("--reference-profile-file-fd=" + std::to_string(reference_profile.GetFd()));
+ argv_str.push_back("--store-aggregation-counters");
+ std::string error;
+
+ EXPECT_EQ(ExecAndReturnCode(argv_str, &error), 0) << error;
+
+ // Verify that we can load the result and that the counters are in place.
+
+ ProfileCompilationInfo result;
+ result.PrepareForAggregationCounters();
+ ASSERT_TRUE(reference_profile.GetFile()->ResetOffset());
+ ASSERT_TRUE(result.Load(reference_profile.GetFd()));
+
+ ASSERT_TRUE(result.StoresAggregationCounters());
+ ASSERT_EQ(2, result.GetAggregationCounter());
+
+ for (uint16_t i = 0; i < kNumberOfMethodsToEnableCompilation; i++) {
+ ASSERT_EQ(1, result.GetMethodAggregationCounter(MethodReference(&d1, i)));
+ ASSERT_EQ(1, result.GetMethodAggregationCounter(MethodReference(&d2, i)));
+ }
+ for (uint16_t i = 0; i < kNumberOfClasses; i++) {
+ ASSERT_EQ(1, result.GetClassAggregationCounter(TypeReference(&d1, dex::TypeIndex(i))));
+ }
+}
+
} // namespace art
diff --git a/profman/profman.cc b/profman/profman.cc
index 2935a05..88c5c4e 100644
--- a/profman/profman.cc
+++ b/profman/profman.cc
@@ -157,6 +157,9 @@
UsageError(" the file passed with --profile-fd(file) to the profile passed with");
UsageError(" --reference-profile-fd(file) and update at the same time the profile-key");
UsageError(" of entries corresponding to the apks passed with --apk(-fd).");
+ UsageError(" --store-aggregation-counters: if present, profman will compute and store");
+ UsageError(" the aggregation counters of classes and methods in the output profile.");
+ UsageError(" In this case the profile will have a different version.");
UsageError("");
exit(EXIT_FAILURE);
@@ -200,7 +203,8 @@
test_profile_class_percentage_(kDefaultTestProfileClassPercentage),
test_profile_seed_(NanoTime()),
start_ns_(NanoTime()),
- copy_and_update_profile_key_(false) {}
+ copy_and_update_profile_key_(false),
+ store_aggregation_counters_(false) {}
~ProfMan() {
LogCompletionTime();
@@ -287,6 +291,8 @@
ParseUintOption(option, "--generate-test-profile-seed", &test_profile_seed_, Usage);
} else if (option.starts_with("--copy-and-update-profile-key")) {
copy_and_update_profile_key_ = true;
+ } else if (option.starts_with("--store-aggregation-counters")) {
+ store_aggregation_counters_ = true;
} else {
Usage("Unknown argument '%s'", option.data());
}
@@ -363,12 +369,14 @@
File file(reference_profile_file_fd_, false);
result = ProfileAssistant::ProcessProfiles(profile_files_fd_,
reference_profile_file_fd_,
- filter_fn);
+ filter_fn,
+ store_aggregation_counters_);
CloseAllFds(profile_files_fd_, "profile_files_fd_");
} else {
result = ProfileAssistant::ProcessProfiles(profile_files_,
reference_profile_file_,
- filter_fn);
+ filter_fn,
+ store_aggregation_counters_);
}
return result;
}
@@ -607,14 +615,14 @@
&startup_methods,
&post_startup_methods)) {
for (const dex::TypeIndex& type_index : class_types) {
- const DexFile::TypeId& type_id = dex_file->GetTypeId(type_index);
+ const dex::TypeId& type_id = dex_file->GetTypeId(type_index);
out_lines->insert(std::string(dex_file->GetTypeDescriptor(type_id)));
}
combined_methods = hot_methods;
combined_methods.insert(startup_methods.begin(), startup_methods.end());
combined_methods.insert(post_startup_methods.begin(), post_startup_methods.end());
for (uint16_t dex_method_idx : combined_methods) {
- const DexFile::MethodId& id = dex_file->GetMethodId(dex_method_idx);
+ const dex::MethodId& id = dex_file->GetMethodId(dex_method_idx);
std::string signature_string(dex_file->GetMethodSignature(id).ToString());
std::string type_string(dex_file->GetTypeDescriptor(dex_file->GetTypeId(id.class_idx_)));
std::string method_name(dex_file->GetMethodName(id));
@@ -774,7 +782,7 @@
}
}
- const DexFile::TypeId* type_id = dex_file->FindTypeId(klass_descriptor.c_str());
+ const dex::TypeId* type_id = dex_file->FindTypeId(klass_descriptor.c_str());
if (type_id == nullptr) {
continue;
}
@@ -810,7 +818,7 @@
const std::string& name = name_and_signature[0];
const std::string& signature = kProfileParsingFirstCharInSignature + name_and_signature[1];
- const DexFile::StringId* name_id = dex_file->FindStringId(name.c_str());
+ const dex::StringId* name_id = dex_file->FindStringId(name.c_str());
if (name_id == nullptr) {
LOG(WARNING) << "Could not find name: " << name;
return dex::kDexNoIndex;
@@ -821,12 +829,12 @@
LOG(WARNING) << "Could not create type list" << signature;
return dex::kDexNoIndex;
}
- const DexFile::ProtoId* proto_id = dex_file->FindProtoId(return_type_idx, param_type_idxs);
+ const dex::ProtoId* proto_id = dex_file->FindProtoId(return_type_idx, param_type_idxs);
if (proto_id == nullptr) {
LOG(WARNING) << "Could not find proto_id: " << name;
return dex::kDexNoIndex;
}
- const DexFile::MethodId* method_id = dex_file->FindMethodId(
+ const dex::MethodId* method_id = dex_file->FindMethodId(
dex_file->GetTypeId(class_ref.TypeIndex()), *name_id, *proto_id);
if (method_id == nullptr) {
LOG(WARNING) << "Could not find method_id: " << name;
@@ -849,7 +857,7 @@
uint32_t offset = dex_file->FindCodeItemOffset(
*dex_file->FindClassDef(class_ref.TypeIndex()),
method_index);
- const DexFile::CodeItem* code_item = dex_file->GetCodeItem(offset);
+ const dex::CodeItem* code_item = dex_file->GetCodeItem(offset);
bool found_invoke = false;
for (const DexInstructionPcPair& inst : CodeItemInstructionAccessor(*dex_file, code_item)) {
@@ -1279,6 +1287,7 @@
uint32_t test_profile_seed_;
uint64_t start_ns_;
bool copy_and_update_profile_key_;
+ bool store_aggregation_counters_;
};
// See ProfileAssistant::ProcessingResult for return codes.
diff --git a/runtime/art_field-inl.h b/runtime/art_field-inl.h
index 53e4c11..6f976d1 100644
--- a/runtime/art_field-inl.h
+++ b/runtime/art_field-inl.h
@@ -29,7 +29,6 @@
#include "jvalue.h"
#include "mirror/dex_cache-inl.h"
#include "mirror/object-inl.h"
-#include "scoped_thread_state_change-inl.h"
#include "thread-current-inl.h"
namespace art {
@@ -291,7 +290,7 @@
return field_index == 0 ? "[Ljava/lang/Class;" : "[[Ljava/lang/Class;";
}
const DexFile* dex_file = GetDexFile();
- const DexFile::FieldId& field_id = dex_file->GetFieldId(field_index);
+ const dex::FieldId& field_id = dex_file->GetFieldId(field_index);
return dex_file->GetFieldTypeDescriptor(field_id);
}
@@ -342,7 +341,7 @@
inline ObjPtr<mirror::String> ArtField::ResolveNameString() {
uint32_t dex_field_index = GetDexFieldIndex();
CHECK_NE(dex_field_index, dex::kDexNoIndex);
- const DexFile::FieldId& field_id = GetDexFile()->GetFieldId(dex_field_index);
+ const dex::FieldId& field_id = GetDexFile()->GetFieldId(dex_field_index);
return Runtime::Current()->GetClassLinker()->ResolveString(field_id.name_idx_, this);
}
diff --git a/runtime/art_method-inl.h b/runtime/art_method-inl.h
index fda269c..e28ffa2 100644
--- a/runtime/art_method-inl.h
+++ b/runtime/art_method-inl.h
@@ -43,7 +43,6 @@
#include "quick/quick_method_frame_info.h"
#include "read_barrier-inl.h"
#include "runtime-inl.h"
-#include "scoped_thread_state_change-inl.h"
#include "thread-current-inl.h"
namespace art {
@@ -224,11 +223,11 @@
inline ObjPtr<mirror::String> ArtMethod::ResolveNameString() {
DCHECK(!IsProxyMethod());
- const DexFile::MethodId& method_id = GetDexFile()->GetMethodId(GetDexMethodIndex());
+ const dex::MethodId& method_id = GetDexFile()->GetMethodId(GetDexMethodIndex());
return Runtime::Current()->GetClassLinker()->ResolveString(method_id.name_idx_, this);
}
-inline const DexFile::CodeItem* ArtMethod::GetCodeItem() {
+inline const dex::CodeItem* ArtMethod::GetCodeItem() {
return GetDexFile()->GetCodeItem(GetCodeItemOffset());
}
@@ -245,16 +244,16 @@
return annotations::GetLineNumFromPC(GetDexFile(), this, dex_pc);
}
-inline const DexFile::ProtoId& ArtMethod::GetPrototype() {
+inline const dex::ProtoId& ArtMethod::GetPrototype() {
DCHECK(!IsProxyMethod());
const DexFile* dex_file = GetDexFile();
return dex_file->GetMethodPrototype(dex_file->GetMethodId(GetDexMethodIndex()));
}
-inline const DexFile::TypeList* ArtMethod::GetParameterTypeList() {
+inline const dex::TypeList* ArtMethod::GetParameterTypeList() {
DCHECK(!IsProxyMethod());
const DexFile* dex_file = GetDexFile();
- const DexFile::ProtoId& proto = dex_file->GetMethodPrototype(
+ const dex::ProtoId& proto = dex_file->GetMethodPrototype(
dex_file->GetMethodId(GetDexMethodIndex()));
return dex_file->GetProtoParameters(proto);
}
@@ -273,7 +272,7 @@
}
}
-inline const DexFile::ClassDef& ArtMethod::GetClassDef() {
+inline const dex::ClassDef& ArtMethod::GetClassDef() {
DCHECK(!IsProxyMethod());
return GetDexFile()->GetClassDef(GetClassDefIndex());
}
@@ -344,8 +343,8 @@
inline dex::TypeIndex ArtMethod::GetReturnTypeIndex() {
DCHECK(!IsProxyMethod());
const DexFile* dex_file = GetDexFile();
- const DexFile::MethodId& method_id = dex_file->GetMethodId(GetDexMethodIndex());
- const DexFile::ProtoId& proto_id = dex_file->GetMethodPrototype(method_id);
+ const dex::MethodId& method_id = dex_file->GetMethodId(GetDexMethodIndex());
+ const dex::ProtoId& proto_id = dex_file->GetMethodPrototype(method_id);
return proto_id.return_type_idx_;
}
diff --git a/runtime/art_method.cc b/runtime/art_method.cc
index 5f5361a..e273d94 100644
--- a/runtime/art_method.cc
+++ b/runtime/art_method.cc
@@ -31,6 +31,7 @@
#include "dex/dex_file-inl.h"
#include "dex/dex_file_exception_helpers.h"
#include "dex/dex_instruction.h"
+#include "dex/signature-inl.h"
#include "entrypoints/runtime_asm_entrypoints.h"
#include "gc/accounting/card_table-inl.h"
#include "hidden_api.h"
@@ -133,7 +134,7 @@
DCHECK(IsObsolete());
const DexFile* dex_file = GetDexFile();
const dex::TypeIndex declaring_class_type = dex_file->GetMethodId(GetDexMethodIndex()).class_idx_;
- const DexFile::ClassDef* class_def = dex_file->FindClassDef(declaring_class_type);
+ const dex::ClassDef* class_def = dex_file->FindClassDef(declaring_class_type);
CHECK(class_def != nullptr);
return dex_file->GetIndexForClassDef(*class_def);
}
@@ -182,14 +183,14 @@
bool ArtMethod::HasSameNameAndSignature(ArtMethod* other) {
ScopedAssertNoThreadSuspension ants("HasSameNameAndSignature");
const DexFile* dex_file = GetDexFile();
- const DexFile::MethodId& mid = dex_file->GetMethodId(GetDexMethodIndex());
+ const dex::MethodId& mid = dex_file->GetMethodId(GetDexMethodIndex());
if (GetDexCache() == other->GetDexCache()) {
- const DexFile::MethodId& mid2 = dex_file->GetMethodId(other->GetDexMethodIndex());
+ const dex::MethodId& mid2 = dex_file->GetMethodId(other->GetDexMethodIndex());
return mid.name_idx_ == mid2.name_idx_ && mid.proto_idx_ == mid2.proto_idx_;
}
const DexFile* dex_file2 = other->GetDexFile();
- const DexFile::MethodId& mid2 = dex_file2->GetMethodId(other->GetDexMethodIndex());
- if (!DexFileStringEquals(dex_file, mid.name_idx_, dex_file2, mid2.name_idx_)) {
+ const dex::MethodId& mid2 = dex_file2->GetMethodId(other->GetDexMethodIndex());
+ if (!DexFile::StringEquals(dex_file, mid.name_idx_, dex_file2, mid2.name_idx_)) {
return false; // Name mismatch.
}
return dex_file->GetMethodSignature(mid) == dex_file2->GetMethodSignature(mid2);
@@ -235,17 +236,17 @@
uint32_t name_and_signature_idx) {
const DexFile* dexfile = GetDexFile();
const uint32_t dex_method_idx = GetDexMethodIndex();
- const DexFile::MethodId& mid = dexfile->GetMethodId(dex_method_idx);
- const DexFile::MethodId& name_and_sig_mid = other_dexfile.GetMethodId(name_and_signature_idx);
+ const dex::MethodId& mid = dexfile->GetMethodId(dex_method_idx);
+ const dex::MethodId& name_and_sig_mid = other_dexfile.GetMethodId(name_and_signature_idx);
DCHECK_STREQ(dexfile->GetMethodName(mid), other_dexfile.GetMethodName(name_and_sig_mid));
DCHECK_EQ(dexfile->GetMethodSignature(mid), other_dexfile.GetMethodSignature(name_and_sig_mid));
if (dexfile == &other_dexfile) {
return dex_method_idx;
}
const char* mid_declaring_class_descriptor = dexfile->StringByTypeIdx(mid.class_idx_);
- const DexFile::TypeId* other_type_id = other_dexfile.FindTypeId(mid_declaring_class_descriptor);
+ const dex::TypeId* other_type_id = other_dexfile.FindTypeId(mid_declaring_class_descriptor);
if (other_type_id != nullptr) {
- const DexFile::MethodId* other_mid = other_dexfile.FindMethodId(
+ const dex::MethodId* other_mid = other_dexfile.FindMethodId(
*other_type_id, other_dexfile.GetStringId(name_and_sig_mid.name_idx_),
other_dexfile.GetProtoId(name_and_sig_mid.proto_idx_));
if (other_mid != nullptr) {
@@ -447,11 +448,11 @@
// recreate the class_def_index from the descriptor.
std::string descriptor_storage;
- const DexFile::TypeId* declaring_class_type_id =
+ const dex::TypeId* declaring_class_type_id =
dex_file->FindTypeId(method->GetDeclaringClass()->GetDescriptor(&descriptor_storage));
CHECK(declaring_class_type_id != nullptr);
dex::TypeIndex declaring_class_type_index = dex_file->GetIndexForTypeId(*declaring_class_type_id);
- const DexFile::ClassDef* declaring_class_type_def =
+ const dex::ClassDef* declaring_class_type_def =
dex_file->FindClassDef(declaring_class_type_index);
CHECK(declaring_class_type_def != nullptr);
uint16_t declaring_class_def_index = dex_file->GetIndexForClassDef(*declaring_class_type_def);
@@ -522,7 +523,7 @@
auto* dex_file = dex_cache->GetDexFile();
const auto& method_id = dex_file->GetMethodId(GetDexMethodIndex());
const auto& proto_id = dex_file->GetMethodPrototype(method_id);
- const DexFile::TypeList* proto_params = dex_file->GetProtoParameters(proto_id);
+ const dex::TypeList* proto_params = dex_file->GetProtoParameters(proto_id);
auto count = proto_params != nullptr ? proto_params->Size() : 0u;
auto param_len = params != nullptr ? params->GetLength() : 0u;
if (param_len != count) {
diff --git a/runtime/art_method.h b/runtime/art_method.h
index cc214f7..aed9f62 100644
--- a/runtime/art_method.h
+++ b/runtime/art_method.h
@@ -20,19 +20,20 @@
#include <cstddef>
#include <android-base/logging.h>
+#include <jni.h>
#include "base/array_ref.h"
#include "base/bit_utils.h"
#include "base/casts.h"
#include "base/enums.h"
-#include "base/iteration_range.h"
#include "base/macros.h"
#include "base/runtime_debug.h"
#include "dex/code_item_accessors.h"
-#include "dex/dex_file.h"
+#include "dex/dex_file_structs.h"
#include "dex/dex_instruction_iterator.h"
#include "dex/modifiers.h"
#include "dex/primitive.h"
+#include "dex/signature.h"
#include "gc_root.h"
#include "obj_ptr.h"
#include "offsets.h"
@@ -40,6 +41,7 @@
namespace art {
+class DexFile;
template<class T> class Handle;
class ImtConflictTable;
enum InvokeType : uint32_t;
@@ -586,21 +588,21 @@
ObjPtr<mirror::String> ResolveNameString() REQUIRES_SHARED(Locks::mutator_lock_);
- const DexFile::CodeItem* GetCodeItem() REQUIRES_SHARED(Locks::mutator_lock_);
+ const dex::CodeItem* GetCodeItem() REQUIRES_SHARED(Locks::mutator_lock_);
bool IsResolvedTypeIdx(dex::TypeIndex type_idx) REQUIRES_SHARED(Locks::mutator_lock_);
int32_t GetLineNumFromDexPC(uint32_t dex_pc) REQUIRES_SHARED(Locks::mutator_lock_);
- const DexFile::ProtoId& GetPrototype() REQUIRES_SHARED(Locks::mutator_lock_);
+ const dex::ProtoId& GetPrototype() REQUIRES_SHARED(Locks::mutator_lock_);
- const DexFile::TypeList* GetParameterTypeList() REQUIRES_SHARED(Locks::mutator_lock_);
+ const dex::TypeList* GetParameterTypeList() REQUIRES_SHARED(Locks::mutator_lock_);
const char* GetDeclaringClassSourceFile() REQUIRES_SHARED(Locks::mutator_lock_);
uint16_t GetClassDefIndex() REQUIRES_SHARED(Locks::mutator_lock_);
- const DexFile::ClassDef& GetClassDef() REQUIRES_SHARED(Locks::mutator_lock_);
+ const dex::ClassDef& GetClassDef() REQUIRES_SHARED(Locks::mutator_lock_);
ALWAYS_INLINE size_t GetNumberOfParameters() REQUIRES_SHARED(Locks::mutator_lock_);
diff --git a/runtime/base/locks.cc b/runtime/base/locks.cc
index cfc9f1d..a7922a2 100644
--- a/runtime/base/locks.cc
+++ b/runtime/base/locks.cc
@@ -61,6 +61,7 @@
Mutex* Locks::reference_queue_soft_references_lock_ = nullptr;
Mutex* Locks::reference_queue_weak_references_lock_ = nullptr;
Mutex* Locks::runtime_shutdown_lock_ = nullptr;
+Mutex* Locks::runtime_thread_pool_lock_ = nullptr;
Mutex* Locks::cha_lock_ = nullptr;
Mutex* Locks::subtype_check_lock_ = nullptr;
Mutex* Locks::thread_list_lock_ = nullptr;
@@ -154,6 +155,7 @@
DCHECK(user_code_suspension_lock_ != nullptr);
DCHECK(dex_lock_ != nullptr);
DCHECK(native_debug_interface_lock_ != nullptr);
+ DCHECK(runtime_thread_pool_lock_ != nullptr);
} else {
// Create global locks in level order from highest lock level to lowest.
LockLevel current_lock_level = kInstrumentEntrypointsLock;
@@ -189,6 +191,10 @@
DCHECK(runtime_shutdown_lock_ == nullptr);
runtime_shutdown_lock_ = new Mutex("runtime shutdown lock", current_lock_level);
+ UPDATE_CURRENT_LOCK_LEVEL(kRuntimeThreadPoolLock);
+ DCHECK(runtime_thread_pool_lock_ == nullptr);
+ runtime_thread_pool_lock_ = new Mutex("runtime thread pool lock", current_lock_level);
+
UPDATE_CURRENT_LOCK_LEVEL(kProfilerLock);
DCHECK(profiler_lock_ == nullptr);
profiler_lock_ = new Mutex("profiler lock", current_lock_level);
diff --git a/runtime/base/locks.h b/runtime/base/locks.h
index 8cbe372..57719f1 100644
--- a/runtime/base/locks.h
+++ b/runtime/base/locks.h
@@ -117,6 +117,7 @@
kJdwpEventListLock,
kJdwpAttachLock,
kJdwpStartLock,
+ kRuntimeThreadPoolLock,
kRuntimeShutdownLock,
kTraceLock,
kHeapBitmapLock,
@@ -224,8 +225,11 @@
// Guards shutdown of the runtime.
static Mutex* runtime_shutdown_lock_ ACQUIRED_AFTER(heap_bitmap_lock_);
+ // Runtime thread pool lock.
+ static Mutex* runtime_thread_pool_lock_ ACQUIRED_AFTER(runtime_shutdown_lock_);
+
// Guards background profiler global state.
- static Mutex* profiler_lock_ ACQUIRED_AFTER(runtime_shutdown_lock_);
+ static Mutex* profiler_lock_ ACQUIRED_AFTER(runtime_thread_pool_lock_);
// Guards trace (ie traceview) requests.
static Mutex* trace_lock_ ACQUIRED_AFTER(profiler_lock_);
diff --git a/runtime/class_linker-inl.h b/runtime/class_linker-inl.h
index 43f3ed3..978b1ab 100644
--- a/runtime/class_linker-inl.h
+++ b/runtime/class_linker-inl.h
@@ -23,6 +23,8 @@
#include "art_method-inl.h"
#include "base/mutex.h"
#include "class_linker.h"
+#include "dex/dex_file.h"
+#include "dex/dex_file_structs.h"
#include "gc_root-inl.h"
#include "handle_scope-inl.h"
#include "mirror/class_loader.h"
@@ -271,7 +273,7 @@
dex_cache,
type,
[this, dex_cache, method_idx, class_loader]() REQUIRES_SHARED(Locks::mutator_lock_) {
- const DexFile::MethodId& method_id = dex_cache->GetDexFile()->GetMethodId(method_idx);
+ const dex::MethodId& method_id = dex_cache->GetDexFile()->GetMethodId(method_idx);
ObjPtr<mirror::Class> klass =
LookupResolvedType(method_id.class_idx_, dex_cache, class_loader);
DCHECK(klass != nullptr);
@@ -286,7 +288,7 @@
ArtMethod* resolved = dex_cache->GetResolvedMethod(method_idx, pointer_size);
if (resolved == nullptr) {
const DexFile& dex_file = *dex_cache->GetDexFile();
- const DexFile::MethodId& method_id = dex_file.GetMethodId(method_idx);
+ const dex::MethodId& method_id = dex_file.GetMethodId(method_idx);
ObjPtr<mirror::Class> klass = LookupResolvedType(method_id.class_idx_, dex_cache, class_loader);
if (klass != nullptr) {
resolved = FindResolvedMethod(klass, dex_cache, class_loader, method_idx);
diff --git a/runtime/class_linker.cc b/runtime/class_linker.cc
index 3b92e2c..5d1f20c 100644
--- a/runtime/class_linker.cc
+++ b/runtime/class_linker.cc
@@ -61,6 +61,7 @@
#include "dex/dex_file-inl.h"
#include "dex/dex_file_exception_helpers.h"
#include "dex/dex_file_loader.h"
+#include "dex/signature-inl.h"
#include "dex/utf.h"
#include "entrypoints/entrypoint_utils.h"
#include "entrypoints/runtime_asm_entrypoints.h"
@@ -2628,13 +2629,13 @@
return klass;
}
-using ClassPathEntry = std::pair<const DexFile*, const DexFile::ClassDef*>;
+using ClassPathEntry = std::pair<const DexFile*, const dex::ClassDef*>;
// Search a collection of DexFiles for a descriptor
ClassPathEntry FindInClassPath(const char* descriptor,
size_t hash, const std::vector<const DexFile*>& class_path) {
for (const DexFile* dex_file : class_path) {
- const DexFile::ClassDef* dex_class_def = OatDexFile::FindClassDef(*dex_file, descriptor, hash);
+ const dex::ClassDef* dex_class_def = OatDexFile::FindClassDef(*dex_file, descriptor, hash);
if (dex_class_def != nullptr) {
return ClassPathEntry(dex_file, dex_class_def);
}
@@ -2784,8 +2785,7 @@
ObjPtr<mirror::Class> ret;
auto define_class = [&](const DexFile* cp_dex_file) REQUIRES_SHARED(Locks::mutator_lock_) {
- const DexFile::ClassDef* dex_class_def =
- OatDexFile::FindClassDef(*cp_dex_file, descriptor, hash);
+ const dex::ClassDef* dex_class_def = OatDexFile::FindClassDef(*cp_dex_file, descriptor, hash);
if (dex_class_def != nullptr) {
ObjPtr<mirror::Class> klass = DefineClass(soa.Self(),
descriptor,
@@ -2985,7 +2985,7 @@
size_t hash,
Handle<mirror::ClassLoader> class_loader,
const DexFile& dex_file,
- const DexFile::ClassDef& dex_class_def) {
+ const dex::ClassDef& dex_class_def) {
StackHandleScope<3> hs(self);
auto klass = hs.NewHandle<mirror::Class>(nullptr);
@@ -3032,7 +3032,7 @@
// Get the real dex file. This will return the input if there aren't any callbacks or they do
// nothing.
DexFile const* new_dex_file = nullptr;
- DexFile::ClassDef const* new_class_def = nullptr;
+ dex::ClassDef const* new_class_def = nullptr;
// TODO We should ideally figure out some way to move this after we get a lock on the klass so it
// will only be called once.
Runtime::Current()->GetRuntimeCallbacks()->ClassPreDefine(descriptor,
@@ -3153,7 +3153,7 @@
}
uint32_t ClassLinker::SizeOfClassWithoutEmbeddedTables(const DexFile& dex_file,
- const DexFile::ClassDef& dex_class_def) {
+ const dex::ClassDef& dex_class_def) {
size_t num_ref = 0;
size_t num_8 = 0;
size_t num_16 = 0;
@@ -3171,7 +3171,7 @@
continue;
}
last_field_idx = field_idx;
- const DexFile::FieldId& field_id = dex_file.GetFieldId(field_idx);
+ const dex::FieldId& field_id = dex_file.GetFieldId(field_idx);
const char* descriptor = dex_file.GetFieldTypeDescriptor(field_id);
char c = descriptor[0];
switch (c) {
@@ -3400,7 +3400,7 @@
}
void ClassLinker::SetupClass(const DexFile& dex_file,
- const DexFile::ClassDef& dex_class_def,
+ const dex::ClassDef& dex_class_def,
Handle<mirror::Class> klass,
ObjPtr<mirror::ClassLoader> class_loader) {
CHECK(klass != nullptr);
@@ -3481,7 +3481,7 @@
void ClassLinker::LoadClass(Thread* self,
const DexFile& dex_file,
- const DexFile::ClassDef& dex_class_def,
+ const dex::ClassDef& dex_class_def,
Handle<mirror::Class> klass) {
ClassAccessor accessor(dex_file,
dex_class_def,
@@ -3606,7 +3606,7 @@
Handle<mirror::Class> klass,
ArtMethod* dst) {
const uint32_t dex_method_idx = method.GetIndex();
- const DexFile::MethodId& method_id = dex_file.GetMethodId(dex_method_idx);
+ const dex::MethodId& method_id = dex_file.GetMethodId(dex_method_idx);
const char* method_name = dex_file.StringDataByIdx(method_id.name_idx_);
ScopedAssertNoThreadSuspension ants("LoadMethod");
@@ -4987,7 +4987,7 @@
}
// Check if there are encoded static values needing initialization.
if (klass->NumStaticFields() != 0) {
- const DexFile::ClassDef* dex_class_def = klass->GetClassDef();
+ const dex::ClassDef* dex_class_def = klass->GetClassDef();
DCHECK(dex_class_def != nullptr);
if (dex_class_def->static_values_off_ != 0) {
return false;
@@ -5204,7 +5204,7 @@
const size_t num_static_fields = klass->NumStaticFields();
if (num_static_fields > 0) {
- const DexFile::ClassDef* dex_class_def = klass->GetClassDef();
+ const dex::ClassDef* dex_class_def = klass->GetClassDef();
CHECK(dex_class_def != nullptr);
StackHandleScope<3> hs(self);
Handle<mirror::ClassLoader> class_loader(hs.NewHandle(klass->GetClassLoader()));
@@ -5413,8 +5413,8 @@
DCHECK(Thread::Current()->IsExceptionPending());
DCHECK(!m->IsProxyMethod());
const DexFile* dex_file = m->GetDexFile();
- const DexFile::MethodId& method_id = dex_file->GetMethodId(m->GetDexMethodIndex());
- const DexFile::ProtoId& proto_id = dex_file->GetMethodPrototype(method_id);
+ const dex::MethodId& method_id = dex_file->GetMethodId(m->GetDexMethodIndex());
+ const dex::ProtoId& proto_id = dex_file->GetMethodPrototype(method_id);
dex::TypeIndex return_type_idx = proto_id.return_type_idx_;
std::string return_type = dex_file->PrettyType(return_type_idx);
std::string class_loader = mirror::Object::PrettyTypeOf(m->GetDeclaringClass()->GetClassLoader());
@@ -5492,8 +5492,8 @@
return false;
}
}
- const DexFile::TypeList* types1 = method1->GetParameterTypeList();
- const DexFile::TypeList* types2 = method2->GetParameterTypeList();
+ const dex::TypeList* types1 = method1->GetParameterTypeList();
+ const dex::TypeList* types2 = method2->GetParameterTypeList();
if (types1 == nullptr) {
if (types2 != nullptr && types2->Size() != 0) {
ThrowSignatureMismatch(klass, super_klass, method1,
@@ -5852,7 +5852,7 @@
bool ClassLinker::LoadSuperAndInterfaces(Handle<mirror::Class> klass, const DexFile& dex_file) {
CHECK_EQ(ClassStatus::kIdx, klass->GetStatus());
- const DexFile::ClassDef& class_def = dex_file.GetClassDef(klass->GetDexClassDefIndex());
+ const dex::ClassDef& class_def = dex_file.GetClassDef(klass->GetDexClassDefIndex());
dex::TypeIndex super_class_idx = class_def.superclass_idx_;
if (super_class_idx.IsValid()) {
// Check that a class does not inherit from itself directly.
@@ -5883,7 +5883,7 @@
CHECK(super_class->IsResolved());
klass->SetSuperClass(super_class);
}
- const DexFile::TypeList* interfaces = dex_file.GetInterfacesList(class_def);
+ const dex::TypeList* interfaces = dex_file.GetInterfacesList(class_def);
if (interfaces != nullptr) {
for (size_t i = 0; i < interfaces->Size(); i++) {
dex::TypeIndex idx = interfaces->GetTypeItem(i).type_idx_;
@@ -6027,7 +6027,7 @@
REQUIRES_SHARED(Locks::mutator_lock_) {
DCHECK(!other->IsProxyMethod()) << other->PrettyMethod();
const DexFile* other_dex_file = other->GetDexFile();
- const DexFile::MethodId& other_mid = other_dex_file->GetMethodId(other->GetDexMethodIndex());
+ const dex::MethodId& other_mid = other_dex_file->GetMethodId(other->GetDexMethodIndex());
if (dex_file_ == other_dex_file) {
return mid_->name_idx_ == other_mid.name_idx_ && mid_->proto_idx_ == other_mid.proto_idx_;
}
@@ -6045,7 +6045,7 @@
// Dex file for the method to compare against.
const DexFile* const dex_file_;
// MethodId for the method to compare against.
- const DexFile::MethodId* const mid_;
+ const dex::MethodId* const mid_;
// Lazily computed name from the dex file's strings.
const char* name_;
// Lazily computed name length.
@@ -8336,7 +8336,7 @@
return resolved;
}
const DexFile& dex_file = *dex_cache->GetDexFile();
- const DexFile::MethodId& method_id = dex_file.GetMethodId(method_idx);
+ const dex::MethodId& method_id = dex_file.GetMethodId(method_idx);
ObjPtr<mirror::Class> klass = nullptr;
if (valid_dex_cache_method) {
// We have a valid method from the DexCache but we need to perform ICCE and IAE checks.
@@ -8417,7 +8417,7 @@
return resolved;
}
// Fail, get the declaring class.
- const DexFile::MethodId& method_id = dex_cache->GetDexFile()->GetMethodId(method_idx);
+ const dex::MethodId& method_id = dex_cache->GetDexFile()->GetMethodId(method_idx);
ObjPtr<mirror::Class> klass = ResolveType(method_id.class_idx_, dex_cache, class_loader);
if (klass == nullptr) {
Thread::Current()->AssertPendingException();
@@ -8443,7 +8443,7 @@
ObjPtr<mirror::ClassLoader> class_loader,
bool is_static) {
const DexFile& dex_file = *dex_cache->GetDexFile();
- const DexFile::FieldId& field_id = dex_file.GetFieldId(field_idx);
+ const dex::FieldId& field_id = dex_file.GetFieldId(field_idx);
ObjPtr<mirror::Class> klass = dex_cache->GetResolvedType(field_id.class_idx_);
if (klass == nullptr) {
klass = LookupResolvedType(field_id.class_idx_, dex_cache, class_loader);
@@ -8468,7 +8468,7 @@
return resolved;
}
const DexFile& dex_file = *dex_cache->GetDexFile();
- const DexFile::FieldId& field_id = dex_file.GetFieldId(field_idx);
+ const dex::FieldId& field_id = dex_file.GetFieldId(field_idx);
ObjPtr<mirror::Class> klass = ResolveType(field_id.class_idx_, dex_cache, class_loader);
if (klass == nullptr) {
DCHECK(Thread::Current()->IsExceptionPending());
@@ -8494,7 +8494,7 @@
return resolved;
}
const DexFile& dex_file = *dex_cache->GetDexFile();
- const DexFile::FieldId& field_id = dex_file.GetFieldId(field_idx);
+ const dex::FieldId& field_id = dex_file.GetFieldId(field_idx);
ObjPtr<mirror::Class> klass = ResolveType(field_id.class_idx_, dex_cache, class_loader);
if (klass == nullptr) {
DCHECK(Thread::Current()->IsExceptionPending());
@@ -8523,7 +8523,7 @@
: klass->FindInstanceField(dex_cache, field_idx);
if (resolved == nullptr) {
- const DexFile::FieldId& field_id = dex_file.GetFieldId(field_idx);
+ const dex::FieldId& field_id = dex_file.GetFieldId(field_idx);
const char* name = dex_file.GetFieldName(field_id);
const char* type = dex_file.GetFieldTypeDescriptor(field_id);
resolved = is_static ? mirror::Class::FindStaticField(self, klass, name, type)
@@ -8551,7 +8551,7 @@
ArtField* resolved = nullptr;
Thread* self = Thread::Current();
const DexFile& dex_file = *dex_cache->GetDexFile();
- const DexFile::FieldId& field_id = dex_file.GetFieldId(field_idx);
+ const dex::FieldId& field_id = dex_file.GetFieldId(field_idx);
const char* name = dex_file.GetFieldName(field_id);
const char* type = dex_file.GetFieldTypeDescriptor(field_id);
@@ -8588,7 +8588,7 @@
// First resolve the return type.
const DexFile& dex_file = *dex_cache->GetDexFile();
- const DexFile::ProtoId& proto_id = dex_file.GetProtoId(proto_idx);
+ const dex::ProtoId& proto_id = dex_file.GetProtoId(proto_idx);
Handle<mirror::Class> return_type(hs.NewHandle(
ResolveType(proto_id.return_type_idx_, dex_cache, class_loader)));
if (return_type == nullptr) {
@@ -8644,7 +8644,7 @@
mirror::MethodHandle* ClassLinker::ResolveMethodHandleForField(
Thread* self,
- const DexFile::MethodHandleItem& method_handle,
+ const dex::MethodHandleItem& method_handle,
ArtMethod* referrer) {
DexFile::MethodHandleType handle_type =
static_cast<DexFile::MethodHandleType>(method_handle.method_handle_type_);
@@ -8772,7 +8772,7 @@
mirror::MethodHandle* ClassLinker::ResolveMethodHandleForMethod(
Thread* self,
- const DexFile::MethodHandleItem& method_handle,
+ const dex::MethodHandleItem& method_handle,
ArtMethod* referrer) {
DexFile::MethodHandleType handle_type =
static_cast<DexFile::MethodHandleType>(method_handle.method_handle_type_);
@@ -8895,7 +8895,7 @@
}
const DexFile* dex_file = referrer->GetDexFile();
- const DexFile::MethodId& method_id = dex_file->GetMethodId(method_handle.field_or_method_idx_);
+ const dex::MethodId& method_id = dex_file->GetMethodId(method_handle.field_or_method_idx_);
int32_t index = 0;
if (receiver_count != 0) {
// Insert receiver. Use the class identified in the method handle rather than the declaring
@@ -8907,7 +8907,7 @@
method_params->Set(index++, receiver_class);
}
- const DexFile::ProtoId& proto_id = dex_file->GetProtoId(method_id.proto_idx_);
+ const dex::ProtoId& proto_id = dex_file->GetProtoId(method_id.proto_idx_);
DexFileParameterIterator it(*dex_file, proto_id);
while (it.HasNext()) {
DCHECK_LT(index, num_params);
@@ -8951,7 +8951,7 @@
ArtMethod* referrer)
REQUIRES_SHARED(Locks::mutator_lock_) {
const DexFile* const dex_file = referrer->GetDexFile();
- const DexFile::MethodHandleItem& method_handle = dex_file->GetMethodHandle(method_handle_idx);
+ const dex::MethodHandleItem& method_handle = dex_file->GetMethodHandle(method_handle_idx);
switch (static_cast<DexFile::MethodHandleType>(method_handle.method_handle_type_)) {
case DexFile::MethodHandleType::kStaticPut:
case DexFile::MethodHandleType::kStaticGet:
diff --git a/runtime/class_linker.h b/runtime/class_linker.h
index d0a7c9b..b9ac9ca 100644
--- a/runtime/class_linker.h
+++ b/runtime/class_linker.h
@@ -29,7 +29,6 @@
#include "base/macros.h"
#include "dex/class_accessor.h"
#include "dex/dex_cache_resolved_classes.h"
-#include "dex/dex_file.h"
#include "dex/dex_file_types.h"
#include "gc_root.h"
#include "handle.h"
@@ -39,6 +38,11 @@
namespace art {
+namespace dex {
+struct ClassDef;
+struct MethodHandleItem;
+} // namespace dex
+
namespace gc {
namespace space {
class ImageSpace;
@@ -73,6 +77,7 @@
class ClassHierarchyAnalysis;
enum class ClassRoot : uint32_t;
class ClassTable;
+class DexFile;
template<class T> class Handle;
class ImtConflictTable;
template<typename T> class LengthPrefixedArray;
@@ -185,7 +190,7 @@
size_t hash,
Handle<mirror::ClassLoader> class_loader,
const DexFile& dex_file,
- const DexFile::ClassDef& dex_class_def)
+ const dex::ClassDef& dex_class_def)
REQUIRES_SHARED(Locks::mutator_lock_)
REQUIRES(!Locks::dex_lock_);
@@ -844,19 +849,19 @@
// Precomputes size needed for Class, in the case of a non-temporary class this size must be
// sufficient to hold all static fields.
uint32_t SizeOfClassWithoutEmbeddedTables(const DexFile& dex_file,
- const DexFile::ClassDef& dex_class_def);
+ const dex::ClassDef& dex_class_def);
// Setup the classloader, class def index, type idx so that we can insert this class in the class
// table.
void SetupClass(const DexFile& dex_file,
- const DexFile::ClassDef& dex_class_def,
+ const dex::ClassDef& dex_class_def,
Handle<mirror::Class> klass,
ObjPtr<mirror::ClassLoader> class_loader)
REQUIRES_SHARED(Locks::mutator_lock_);
void LoadClass(Thread* self,
const DexFile& dex_file,
- const DexFile::ClassDef& dex_class_def,
+ const dex::ClassDef& dex_class_def,
Handle<mirror::Class> klass)
REQUIRES_SHARED(Locks::mutator_lock_);
@@ -1028,12 +1033,12 @@
REQUIRES_SHARED(Locks::mutator_lock_);
mirror::MethodHandle* ResolveMethodHandleForField(Thread* self,
- const DexFile::MethodHandleItem& method_handle,
+ const dex::MethodHandleItem& method_handle,
ArtMethod* referrer)
REQUIRES_SHARED(Locks::mutator_lock_);
mirror::MethodHandle* ResolveMethodHandleForMethod(Thread* self,
- const DexFile::MethodHandleItem& method_handle,
+ const dex::MethodHandleItem& method_handle,
ArtMethod* referrer)
REQUIRES_SHARED(Locks::mutator_lock_);
@@ -1403,9 +1408,9 @@
Handle<mirror::Class> klass ATTRIBUTE_UNUSED,
Handle<mirror::ClassLoader> class_loader ATTRIBUTE_UNUSED,
const DexFile& initial_dex_file ATTRIBUTE_UNUSED,
- const DexFile::ClassDef& initial_class_def ATTRIBUTE_UNUSED,
+ const dex::ClassDef& initial_class_def ATTRIBUTE_UNUSED,
/*out*/DexFile const** final_dex_file ATTRIBUTE_UNUSED,
- /*out*/DexFile::ClassDef const** final_class_def ATTRIBUTE_UNUSED)
+ /*out*/dex::ClassDef const** final_class_def ATTRIBUTE_UNUSED)
REQUIRES_SHARED(Locks::mutator_lock_) {}
// A class has been loaded.
diff --git a/runtime/class_linker_test.cc b/runtime/class_linker_test.cc
index 061c788..2f37123 100644
--- a/runtime/class_linker_test.cc
+++ b/runtime/class_linker_test.cc
@@ -28,6 +28,7 @@
#include "class_root.h"
#include "common_runtime_test.h"
#include "dex/dex_file_types.h"
+#include "dex/signature-inl.h"
#include "dex/standard_dex_file.h"
#include "entrypoints/entrypoint_utils-inl.h"
#include "experimental_flags.h"
@@ -429,13 +430,13 @@
REQUIRES_SHARED(Locks::mutator_lock_) {
// Verify all the classes defined in this file
for (size_t i = 0; i < dex.NumClassDefs(); i++) {
- const DexFile::ClassDef& class_def = dex.GetClassDef(i);
+ const dex::ClassDef& class_def = dex.GetClassDef(i);
const char* descriptor = dex.GetClassDescriptor(class_def);
AssertDexFileClass(class_loader, descriptor);
}
// Verify all the types referenced by this file
for (size_t i = 0; i < dex.NumTypeIds(); i++) {
- const DexFile::TypeId& type_id = dex.GetTypeId(dex::TypeIndex(i));
+ const dex::TypeId& type_id = dex.GetTypeId(dex::TypeIndex(i));
const char* descriptor = dex.GetTypeDescriptor(type_id);
AssertDexFileClass(class_loader, descriptor);
}
@@ -997,7 +998,7 @@
Handle<mirror::DexCache> dex_cache = hs.NewHandle(all_fields_klass->GetDexCache());
const DexFile& dex_file = *dex_cache->GetDexFile();
// Get the index of the array class we want to test.
- const DexFile::TypeId* array_id = dex_file.FindTypeId("[Ljava/lang/Object;");
+ const dex::TypeId* array_id = dex_file.FindTypeId("[Ljava/lang/Object;");
ASSERT_TRUE(array_id != nullptr);
dex::TypeIndex array_idx = dex_file.GetIndexForTypeId(*array_id);
// Check that the array class wasn't resolved yet.
@@ -1323,7 +1324,7 @@
klass->FindClassMethod("getS0", "()Ljava/lang/Object;", kRuntimePointerSize);
ASSERT_TRUE(getS0 != nullptr);
ASSERT_TRUE(getS0->IsStatic());
- const DexFile::TypeId* type_id = dex_file->FindTypeId("LStaticsFromCode;");
+ const dex::TypeId* type_id = dex_file->FindTypeId("LStaticsFromCode;");
ASSERT_TRUE(type_id != nullptr);
dex::TypeIndex type_idx = dex_file->GetIndexForTypeId(*type_id);
ObjPtr<mirror::Class> uninit = ResolveVerifyAndClinit(type_idx,
@@ -1564,7 +1565,7 @@
Handle<mirror::DexCache> dex_cache = hs.NewHandle(
class_linker_->FindDexCache(soa.Self(), dex_file));
- const DexFile::MethodId& method1_id = dex_file.GetMethodId(method1->GetDexMethodIndex());
+ const dex::MethodId& method1_id = dex_file.GetMethodId(method1->GetDexMethodIndex());
// This is the MethodType corresponding to the prototype of
// String MethodTypes# method1(String).
@@ -1596,7 +1597,7 @@
kRuntimePointerSize);
ASSERT_TRUE(method2 != nullptr);
ASSERT_FALSE(method2->IsDirect());
- const DexFile::MethodId& method2_id = dex_file.GetMethodId(method2->GetDexMethodIndex());
+ const dex::MethodId& method2_id = dex_file.GetMethodId(method2->GetDexMethodIndex());
Handle<mirror::MethodType> method2_type = hs.NewHandle(
class_linker_->ResolveMethodType(soa.Self(), method2_id.proto_idx_, dex_cache, class_loader));
ASSERT_OBJ_PTR_NE(method1_type.Get(), method2_type.Get());
diff --git a/runtime/common_dex_operations.h b/runtime/common_dex_operations.h
index 5c5431d..1c95622 100644
--- a/runtime/common_dex_operations.h
+++ b/runtime/common_dex_operations.h
@@ -24,6 +24,7 @@
#include "base/macros.h"
#include "class_linker.h"
#include "dex/code_item_accessors.h"
+#include "dex/dex_file_structs.h"
#include "dex/primitive.h"
#include "handle_scope-inl.h"
#include "instrumentation.h"
@@ -42,7 +43,7 @@
namespace interpreter {
void ArtInterpreterToInterpreterBridge(Thread* self,
- const DexFile::CodeItem* code_item,
+ const dex::CodeItem* code_item,
ShadowFrame* shadow_frame,
JValue* result)
REQUIRES_SHARED(Locks::mutator_lock_);
diff --git a/runtime/debugger.cc b/runtime/debugger.cc
index 80140b3..adf01c3 100644
--- a/runtime/debugger.cc
+++ b/runtime/debugger.cc
@@ -3927,7 +3927,7 @@
StackHandleScope<2> hs(soa.Self());
HandleWrapper<mirror::Object> h_obj(hs.NewHandleWrapper(&receiver));
HandleWrapper<mirror::Class> h_klass(hs.NewHandleWrapper(&c));
- const DexFile::TypeList* types = m->GetParameterTypeList();
+ const dex::TypeList* types = m->GetParameterTypeList();
for (size_t i = 0; i < arg_count; ++i) {
if (shorty[i + 1] != JdwpTagToShortyChar(arg_types[i])) {
return JDWP::ERR_ILLEGAL_ARGUMENT;
diff --git a/runtime/dex/dex_file_annotations.cc b/runtime/dex/dex_file_annotations.cc
index 9127a27..e75baf8 100644
--- a/runtime/dex/dex_file_annotations.cc
+++ b/runtime/dex/dex_file_annotations.cc
@@ -44,6 +44,15 @@
using android::base::StringPrintf;
+using dex::AnnotationItem;
+using dex::AnnotationSetItem;
+using dex::AnnotationSetRefItem;
+using dex::AnnotationSetRefList;
+using dex::AnnotationsDirectoryItem;
+using dex::FieldAnnotationsItem;
+using dex::MethodAnnotationsItem;
+using dex::ParameterAnnotationsItem;
+
struct DexFile::AnnotationValue {
JValue value_;
uint8_t type_;
@@ -75,7 +84,7 @@
return dex_file_;
}
- const DexFile::ClassDef* GetClassDef() const REQUIRES_SHARED(Locks::mutator_lock_) {
+ const dex::ClassDef* GetClassDef() const REQUIRES_SHARED(Locks::mutator_lock_) {
return class_def_;
}
@@ -107,7 +116,7 @@
ClassData(Handle<mirror::Class> klass,
ArtMethod* method,
const DexFile& dex_file,
- const DexFile::ClassDef* class_def) REQUIRES_SHARED(Locks::mutator_lock_)
+ const dex::ClassDef* class_def) REQUIRES_SHARED(Locks::mutator_lock_)
: real_klass_(klass),
method_(method),
dex_file_(dex_file),
@@ -118,7 +127,7 @@
Handle<mirror::Class> real_klass_;
ArtMethod* method_;
const DexFile& dex_file_;
- const DexFile::ClassDef* class_def_;
+ const dex::ClassDef* class_def_;
DISALLOW_COPY_AND_ASSIGN(ClassData);
};
@@ -137,21 +146,20 @@
return actual == expected;
}
-const DexFile::AnnotationSetItem* FindAnnotationSetForField(ArtField* field)
+const AnnotationSetItem* FindAnnotationSetForField(ArtField* field)
REQUIRES_SHARED(Locks::mutator_lock_) {
const DexFile* dex_file = field->GetDexFile();
ObjPtr<mirror::Class> klass = field->GetDeclaringClass();
- const DexFile::ClassDef* class_def = klass->GetClassDef();
+ const dex::ClassDef* class_def = klass->GetClassDef();
if (class_def == nullptr) {
DCHECK(klass->IsProxyClass());
return nullptr;
}
- const DexFile::AnnotationsDirectoryItem* annotations_dir =
- dex_file->GetAnnotationsDirectory(*class_def);
+ const AnnotationsDirectoryItem* annotations_dir = dex_file->GetAnnotationsDirectory(*class_def);
if (annotations_dir == nullptr) {
return nullptr;
}
- const DexFile::FieldAnnotationsItem* field_annotations =
+ const FieldAnnotationsItem* field_annotations =
dex_file->GetFieldAnnotations(annotations_dir);
if (field_annotations == nullptr) {
return nullptr;
@@ -166,14 +174,14 @@
return nullptr;
}
-const DexFile::AnnotationItem* SearchAnnotationSet(const DexFile& dex_file,
- const DexFile::AnnotationSetItem* annotation_set,
- const char* descriptor,
- uint32_t visibility)
+const AnnotationItem* SearchAnnotationSet(const DexFile& dex_file,
+ const AnnotationSetItem* annotation_set,
+ const char* descriptor,
+ uint32_t visibility)
REQUIRES_SHARED(Locks::mutator_lock_) {
- const DexFile::AnnotationItem* result = nullptr;
+ const AnnotationItem* result = nullptr;
for (uint32_t i = 0; i < annotation_set->size_; ++i) {
- const DexFile::AnnotationItem* annotation_item = dex_file.GetAnnotationItem(annotation_set, i);
+ const AnnotationItem* annotation_item = dex_file.GetAnnotationItem(annotation_set, i);
if (!IsVisibilityCompatible(annotation_item->visibility_, visibility)) {
continue;
}
@@ -268,16 +276,14 @@
return nullptr;
}
-const DexFile::AnnotationSetItem* FindAnnotationSetForMethod(const DexFile& dex_file,
- const DexFile::ClassDef& class_def,
- uint32_t method_index) {
- const DexFile::AnnotationsDirectoryItem* annotations_dir =
- dex_file.GetAnnotationsDirectory(class_def);
+const AnnotationSetItem* FindAnnotationSetForMethod(const DexFile& dex_file,
+ const dex::ClassDef& class_def,
+ uint32_t method_index) {
+ const AnnotationsDirectoryItem* annotations_dir = dex_file.GetAnnotationsDirectory(class_def);
if (annotations_dir == nullptr) {
return nullptr;
}
- const DexFile::MethodAnnotationsItem* method_annotations =
- dex_file.GetMethodAnnotations(annotations_dir);
+ const MethodAnnotationsItem* method_annotations = dex_file.GetMethodAnnotations(annotations_dir);
if (method_annotations == nullptr) {
return nullptr;
}
@@ -290,7 +296,7 @@
return nullptr;
}
-inline const DexFile::AnnotationSetItem* FindAnnotationSetForMethod(ArtMethod* method)
+inline const AnnotationSetItem* FindAnnotationSetForMethod(ArtMethod* method)
REQUIRES_SHARED(Locks::mutator_lock_) {
if (method->IsProxyMethod()) {
return nullptr;
@@ -300,15 +306,15 @@
method->GetDexMethodIndex());
}
-const DexFile::ParameterAnnotationsItem* FindAnnotationsItemForMethod(ArtMethod* method)
+const ParameterAnnotationsItem* FindAnnotationsItemForMethod(ArtMethod* method)
REQUIRES_SHARED(Locks::mutator_lock_) {
const DexFile* dex_file = method->GetDexFile();
- const DexFile::AnnotationsDirectoryItem* annotations_dir =
+ const AnnotationsDirectoryItem* annotations_dir =
dex_file->GetAnnotationsDirectory(method->GetClassDef());
if (annotations_dir == nullptr) {
return nullptr;
}
- const DexFile::ParameterAnnotationsItem* parameter_annotations =
+ const ParameterAnnotationsItem* parameter_annotations =
dex_file->GetParameterAnnotations(annotations_dir);
if (parameter_annotations == nullptr) {
return nullptr;
@@ -323,16 +329,15 @@
return nullptr;
}
-const DexFile::AnnotationSetItem* FindAnnotationSetForClass(const ClassData& klass)
+const AnnotationSetItem* FindAnnotationSetForClass(const ClassData& klass)
REQUIRES_SHARED(Locks::mutator_lock_) {
const DexFile& dex_file = klass.GetDexFile();
- const DexFile::ClassDef* class_def = klass.GetClassDef();
+ const dex::ClassDef* class_def = klass.GetClassDef();
if (class_def == nullptr) {
DCHECK(klass.GetRealClass()->IsProxyClass());
return nullptr;
}
- const DexFile::AnnotationsDirectoryItem* annotations_dir =
- dex_file.GetAnnotationsDirectory(*class_def);
+ const AnnotationsDirectoryItem* annotations_dir = dex_file.GetAnnotationsDirectory(*class_def);
if (annotations_dir == nullptr) {
return nullptr;
}
@@ -768,15 +773,14 @@
return new_member.Get();
}
-const DexFile::AnnotationItem* GetAnnotationItemFromAnnotationSet(
- const ClassData& klass,
- const DexFile::AnnotationSetItem* annotation_set,
- uint32_t visibility,
- Handle<mirror::Class> annotation_class)
+const AnnotationItem* GetAnnotationItemFromAnnotationSet(const ClassData& klass,
+ const AnnotationSetItem* annotation_set,
+ uint32_t visibility,
+ Handle<mirror::Class> annotation_class)
REQUIRES_SHARED(Locks::mutator_lock_) {
const DexFile& dex_file = klass.GetDexFile();
for (uint32_t i = 0; i < annotation_set->size_; ++i) {
- const DexFile::AnnotationItem* annotation_item = dex_file.GetAnnotationItem(annotation_set, i);
+ const AnnotationItem* annotation_item = dex_file.GetAnnotationItem(annotation_set, i);
if (!IsVisibilityCompatible(annotation_item->visibility_, visibility)) {
continue;
}
@@ -805,13 +809,12 @@
return nullptr;
}
-ObjPtr<mirror::Object> GetAnnotationObjectFromAnnotationSet(
- const ClassData& klass,
- const DexFile::AnnotationSetItem* annotation_set,
- uint32_t visibility,
- Handle<mirror::Class> annotation_class)
+ObjPtr<mirror::Object> GetAnnotationObjectFromAnnotationSet(const ClassData& klass,
+ const AnnotationSetItem* annotation_set,
+ uint32_t visibility,
+ Handle<mirror::Class> annotation_class)
REQUIRES_SHARED(Locks::mutator_lock_) {
- const DexFile::AnnotationItem* annotation_item = GetAnnotationItemFromAnnotationSet(
+ const AnnotationItem* annotation_item = GetAnnotationItemFromAnnotationSet(
klass, annotation_set, visibility, annotation_class);
if (annotation_item == nullptr) {
return nullptr;
@@ -821,7 +824,7 @@
}
ObjPtr<mirror::Object> GetAnnotationValue(const ClassData& klass,
- const DexFile::AnnotationItem* annotation_item,
+ const AnnotationItem* annotation_item,
const char* annotation_name,
Handle<mirror::Class> array_class,
uint32_t expected_type)
@@ -855,11 +858,11 @@
static ObjPtr<mirror::ObjectArray<mirror::String>> GetSignatureValue(
const ClassData& klass,
- const DexFile::AnnotationSetItem* annotation_set)
+ const AnnotationSetItem* annotation_set)
REQUIRES_SHARED(Locks::mutator_lock_) {
const DexFile& dex_file = klass.GetDexFile();
StackHandleScope<1> hs(Thread::Current());
- const DexFile::AnnotationItem* annotation_item =
+ const AnnotationItem* annotation_item =
SearchAnnotationSet(dex_file, annotation_set, "Ldalvik/annotation/Signature;",
DexFile::kDexVisibilitySystem);
if (annotation_item == nullptr) {
@@ -877,12 +880,11 @@
return obj->AsObjectArray<mirror::String>();
}
-ObjPtr<mirror::ObjectArray<mirror::Class>> GetThrowsValue(
- const ClassData& klass,
- const DexFile::AnnotationSetItem* annotation_set)
+ObjPtr<mirror::ObjectArray<mirror::Class>> GetThrowsValue(const ClassData& klass,
+ const AnnotationSetItem* annotation_set)
REQUIRES_SHARED(Locks::mutator_lock_) {
const DexFile& dex_file = klass.GetDexFile();
- const DexFile::AnnotationItem* annotation_item =
+ const AnnotationItem* annotation_item =
SearchAnnotationSet(dex_file, annotation_set, "Ldalvik/annotation/Throws;",
DexFile::kDexVisibilitySystem);
if (annotation_item == nullptr) {
@@ -903,7 +905,7 @@
ObjPtr<mirror::ObjectArray<mirror::Object>> ProcessAnnotationSet(
const ClassData& klass,
- const DexFile::AnnotationSetItem* annotation_set,
+ const AnnotationSetItem* annotation_set,
uint32_t visibility)
REQUIRES_SHARED(Locks::mutator_lock_) {
const DexFile& dex_file = klass.GetDexFile();
@@ -925,7 +927,7 @@
uint32_t dest_index = 0;
for (uint32_t i = 0; i < size; ++i) {
- const DexFile::AnnotationItem* annotation_item = dex_file.GetAnnotationItem(annotation_set, i);
+ const AnnotationItem* annotation_item = dex_file.GetAnnotationItem(annotation_set, i);
// Note that we do not use IsVisibilityCompatible here because older code
// was correct for this case.
if (annotation_item->visibility_ != visibility) {
@@ -961,7 +963,7 @@
ObjPtr<mirror::ObjectArray<mirror::Object>> ProcessAnnotationSetRefList(
const ClassData& klass,
- const DexFile::AnnotationSetRefList* set_ref_list,
+ const AnnotationSetRefList* set_ref_list,
uint32_t size)
REQUIRES_SHARED(Locks::mutator_lock_) {
const DexFile& dex_file = klass.GetDexFile();
@@ -982,8 +984,8 @@
return nullptr;
}
for (uint32_t index = 0; index < size; ++index) {
- const DexFile::AnnotationSetRefItem* set_ref_item = &set_ref_list->list_[index];
- const DexFile::AnnotationSetItem* set_item = dex_file.GetSetRefItemItem(set_ref_item);
+ const AnnotationSetRefItem* set_ref_item = &set_ref_list->list_[index];
+ const AnnotationSetItem* set_item = dex_file.GetSetRefItemItem(set_ref_item);
ObjPtr<mirror::Object> annotation_set = ProcessAnnotationSet(klass,
set_item,
DexFile::kDexVisibilityRuntime);
@@ -1000,7 +1002,7 @@
ObjPtr<mirror::Object> GetAnnotationForField(ArtField* field,
Handle<mirror::Class> annotation_class) {
- const DexFile::AnnotationSetItem* annotation_set = FindAnnotationSetForField(field);
+ const AnnotationSetItem* annotation_set = FindAnnotationSetForField(field);
if (annotation_set == nullptr) {
return nullptr;
}
@@ -1013,14 +1015,14 @@
}
ObjPtr<mirror::ObjectArray<mirror::Object>> GetAnnotationsForField(ArtField* field) {
- const DexFile::AnnotationSetItem* annotation_set = FindAnnotationSetForField(field);
+ const AnnotationSetItem* annotation_set = FindAnnotationSetForField(field);
StackHandleScope<1> hs(Thread::Current());
const ClassData field_class(hs, field);
return ProcessAnnotationSet(field_class, annotation_set, DexFile::kDexVisibilityRuntime);
}
ObjPtr<mirror::ObjectArray<mirror::String>> GetSignatureAnnotationForField(ArtField* field) {
- const DexFile::AnnotationSetItem* annotation_set = FindAnnotationSetForField(field);
+ const AnnotationSetItem* annotation_set = FindAnnotationSetForField(field);
if (annotation_set == nullptr) {
return nullptr;
}
@@ -1030,13 +1032,13 @@
}
bool IsFieldAnnotationPresent(ArtField* field, Handle<mirror::Class> annotation_class) {
- const DexFile::AnnotationSetItem* annotation_set = FindAnnotationSetForField(field);
+ const AnnotationSetItem* annotation_set = FindAnnotationSetForField(field);
if (annotation_set == nullptr) {
return false;
}
StackHandleScope<1> hs(Thread::Current());
const ClassData field_class(hs, field);
- const DexFile::AnnotationItem* annotation_item = GetAnnotationItemFromAnnotationSet(
+ const AnnotationItem* annotation_item = GetAnnotationItemFromAnnotationSet(
field_class, annotation_set, DexFile::kDexVisibilityRuntime, annotation_class);
return annotation_item != nullptr;
}
@@ -1044,17 +1046,17 @@
ObjPtr<mirror::Object> GetAnnotationDefaultValue(ArtMethod* method) {
const ClassData klass(method);
const DexFile* dex_file = &klass.GetDexFile();
- const DexFile::AnnotationsDirectoryItem* annotations_dir =
+ const AnnotationsDirectoryItem* annotations_dir =
dex_file->GetAnnotationsDirectory(*klass.GetClassDef());
if (annotations_dir == nullptr) {
return nullptr;
}
- const DexFile::AnnotationSetItem* annotation_set =
+ const AnnotationSetItem* annotation_set =
dex_file->GetClassAnnotationSet(annotations_dir);
if (annotation_set == nullptr) {
return nullptr;
}
- const DexFile::AnnotationItem* annotation_item = SearchAnnotationSet(*dex_file, annotation_set,
+ const AnnotationItem* annotation_item = SearchAnnotationSet(*dex_file, annotation_set,
"Ldalvik/annotation/AnnotationDefault;", DexFile::kDexVisibilitySystem);
if (annotation_item == nullptr) {
return nullptr;
@@ -1087,7 +1089,7 @@
ObjPtr<mirror::Object> GetAnnotationForMethod(ArtMethod* method,
Handle<mirror::Class> annotation_class) {
- const DexFile::AnnotationSetItem* annotation_set = FindAnnotationSetForMethod(method);
+ const AnnotationSetItem* annotation_set = FindAnnotationSetForMethod(method);
if (annotation_set == nullptr) {
return nullptr;
}
@@ -1096,14 +1098,14 @@
}
ObjPtr<mirror::ObjectArray<mirror::Object>> GetAnnotationsForMethod(ArtMethod* method) {
- const DexFile::AnnotationSetItem* annotation_set = FindAnnotationSetForMethod(method);
+ const AnnotationSetItem* annotation_set = FindAnnotationSetForMethod(method);
return ProcessAnnotationSet(ClassData(method),
annotation_set,
DexFile::kDexVisibilityRuntime);
}
ObjPtr<mirror::ObjectArray<mirror::Class>> GetExceptionTypesForMethod(ArtMethod* method) {
- const DexFile::AnnotationSetItem* annotation_set = FindAnnotationSetForMethod(method);
+ const AnnotationSetItem* annotation_set = FindAnnotationSetForMethod(method);
if (annotation_set == nullptr) {
return nullptr;
}
@@ -1112,12 +1114,12 @@
ObjPtr<mirror::ObjectArray<mirror::Object>> GetParameterAnnotations(ArtMethod* method) {
const DexFile* dex_file = method->GetDexFile();
- const DexFile::ParameterAnnotationsItem* parameter_annotations =
+ const ParameterAnnotationsItem* parameter_annotations =
FindAnnotationsItemForMethod(method);
if (parameter_annotations == nullptr) {
return nullptr;
}
- const DexFile::AnnotationSetRefList* set_ref_list =
+ const AnnotationSetRefList* set_ref_list =
dex_file->GetParameterAnnotationSetRefList(parameter_annotations);
if (set_ref_list == nullptr) {
return nullptr;
@@ -1128,12 +1130,12 @@
uint32_t GetNumberOfAnnotatedMethodParameters(ArtMethod* method) {
const DexFile* dex_file = method->GetDexFile();
- const DexFile::ParameterAnnotationsItem* parameter_annotations =
+ const ParameterAnnotationsItem* parameter_annotations =
FindAnnotationsItemForMethod(method);
if (parameter_annotations == nullptr) {
return 0u;
}
- const DexFile::AnnotationSetRefList* set_ref_list =
+ const AnnotationSetRefList* set_ref_list =
dex_file->GetParameterAnnotationSetRefList(parameter_annotations);
if (set_ref_list == nullptr) {
return 0u;
@@ -1145,12 +1147,11 @@
uint32_t parameter_idx,
Handle<mirror::Class> annotation_class) {
const DexFile* dex_file = method->GetDexFile();
- const DexFile::ParameterAnnotationsItem* parameter_annotations =
- FindAnnotationsItemForMethod(method);
+ const ParameterAnnotationsItem* parameter_annotations = FindAnnotationsItemForMethod(method);
if (parameter_annotations == nullptr) {
return nullptr;
}
- const DexFile::AnnotationSetRefList* set_ref_list =
+ const AnnotationSetRefList* set_ref_list =
dex_file->GetParameterAnnotationSetRefList(parameter_annotations);
if (set_ref_list == nullptr) {
return nullptr;
@@ -1158,8 +1159,8 @@
if (parameter_idx >= set_ref_list->size_) {
return nullptr;
}
- const DexFile::AnnotationSetRefItem* annotation_set_ref = &set_ref_list->list_[parameter_idx];
- const DexFile::AnnotationSetItem* annotation_set =
+ const AnnotationSetRefItem* annotation_set_ref = &set_ref_list->list_[parameter_idx];
+ const AnnotationSetItem* annotation_set =
dex_file->GetSetRefItemItem(annotation_set_ref);
if (annotation_set == nullptr) {
return nullptr;
@@ -1174,14 +1175,14 @@
ArtMethod* method,
/*out*/ MutableHandle<mirror::ObjectArray<mirror::String>>* names,
/*out*/ MutableHandle<mirror::IntArray>* access_flags) {
- const DexFile::AnnotationSetItem* annotation_set =
+ const AnnotationSetItem* annotation_set =
FindAnnotationSetForMethod(method);
if (annotation_set == nullptr) {
return false;
}
const DexFile* dex_file = method->GetDexFile();
- const DexFile::AnnotationItem* annotation_item =
+ const AnnotationItem* annotation_item =
SearchAnnotationSet(*dex_file,
annotation_set,
"Ldalvik/annotation/MethodParameters;",
@@ -1228,7 +1229,7 @@
}
ObjPtr<mirror::ObjectArray<mirror::String>> GetSignatureAnnotationForMethod(ArtMethod* method) {
- const DexFile::AnnotationSetItem* annotation_set = FindAnnotationSetForMethod(method);
+ const AnnotationSetItem* annotation_set = FindAnnotationSetForMethod(method);
if (annotation_set == nullptr) {
return nullptr;
}
@@ -1238,11 +1239,11 @@
bool IsMethodAnnotationPresent(ArtMethod* method,
Handle<mirror::Class> annotation_class,
uint32_t visibility /* = DexFile::kDexVisibilityRuntime */) {
- const DexFile::AnnotationSetItem* annotation_set = FindAnnotationSetForMethod(method);
+ const AnnotationSetItem* annotation_set = FindAnnotationSetForMethod(method);
if (annotation_set == nullptr) {
return false;
}
- const DexFile::AnnotationItem* annotation_item = GetAnnotationItemFromAnnotationSet(
+ const AnnotationItem* annotation_item = GetAnnotationItemFromAnnotationSet(
ClassData(method), annotation_set, visibility, annotation_class);
return annotation_item != nullptr;
}
@@ -1263,11 +1264,11 @@
// Check whether a method from the `dex_file` with the given `annotation_set`
// is annotated with `annotation_descriptor` with build visibility.
static bool IsMethodBuildAnnotationPresent(const DexFile& dex_file,
- const DexFile::AnnotationSetItem& annotation_set,
+ const AnnotationSetItem& annotation_set,
const char* annotation_descriptor,
jclass annotation_class) {
for (uint32_t i = 0; i < annotation_set.size_; ++i) {
- const DexFile::AnnotationItem* annotation_item = dex_file.GetAnnotationItem(&annotation_set, i);
+ const AnnotationItem* annotation_item = dex_file.GetAnnotationItem(&annotation_set, i);
if (!IsVisibilityCompatible(annotation_item->visibility_, DexFile::kDexVisibilityBuild)) {
continue;
}
@@ -1283,9 +1284,9 @@
}
uint32_t GetNativeMethodAnnotationAccessFlags(const DexFile& dex_file,
- const DexFile::ClassDef& class_def,
+ const dex::ClassDef& class_def,
uint32_t method_index) {
- const DexFile::AnnotationSetItem* annotation_set =
+ const dex::AnnotationSetItem* annotation_set =
FindAnnotationSetForMethod(dex_file, class_def, method_index);
if (annotation_set == nullptr) {
return 0u;
@@ -1312,7 +1313,7 @@
ObjPtr<mirror::Object> GetAnnotationForClass(Handle<mirror::Class> klass,
Handle<mirror::Class> annotation_class) {
ClassData data(klass);
- const DexFile::AnnotationSetItem* annotation_set = FindAnnotationSetForClass(data);
+ const AnnotationSetItem* annotation_set = FindAnnotationSetForClass(data);
if (annotation_set == nullptr) {
return nullptr;
}
@@ -1324,17 +1325,17 @@
ObjPtr<mirror::ObjectArray<mirror::Object>> GetAnnotationsForClass(Handle<mirror::Class> klass) {
ClassData data(klass);
- const DexFile::AnnotationSetItem* annotation_set = FindAnnotationSetForClass(data);
+ const AnnotationSetItem* annotation_set = FindAnnotationSetForClass(data);
return ProcessAnnotationSet(data, annotation_set, DexFile::kDexVisibilityRuntime);
}
ObjPtr<mirror::ObjectArray<mirror::Class>> GetDeclaredClasses(Handle<mirror::Class> klass) {
ClassData data(klass);
- const DexFile::AnnotationSetItem* annotation_set = FindAnnotationSetForClass(data);
+ const AnnotationSetItem* annotation_set = FindAnnotationSetForClass(data);
if (annotation_set == nullptr) {
return nullptr;
}
- const DexFile::AnnotationItem* annotation_item =
+ const AnnotationItem* annotation_item =
SearchAnnotationSet(data.GetDexFile(), annotation_set, "Ldalvik/annotation/MemberClasses;",
DexFile::kDexVisibilitySystem);
if (annotation_item == nullptr) {
@@ -1355,11 +1356,11 @@
ObjPtr<mirror::Class> GetDeclaringClass(Handle<mirror::Class> klass) {
ClassData data(klass);
- const DexFile::AnnotationSetItem* annotation_set = FindAnnotationSetForClass(data);
+ const AnnotationSetItem* annotation_set = FindAnnotationSetForClass(data);
if (annotation_set == nullptr) {
return nullptr;
}
- const DexFile::AnnotationItem* annotation_item =
+ const AnnotationItem* annotation_item =
SearchAnnotationSet(data.GetDexFile(), annotation_set, "Ldalvik/annotation/EnclosingClass;",
DexFile::kDexVisibilitySystem);
if (annotation_item == nullptr) {
@@ -1382,11 +1383,11 @@
return declaring_class;
}
ClassData data(klass);
- const DexFile::AnnotationSetItem* annotation_set = FindAnnotationSetForClass(data);
+ const AnnotationSetItem* annotation_set = FindAnnotationSetForClass(data);
if (annotation_set == nullptr) {
return nullptr;
}
- const DexFile::AnnotationItem* annotation_item =
+ const AnnotationItem* annotation_item =
SearchAnnotationSet(data.GetDexFile(),
annotation_set,
"Ldalvik/annotation/EnclosingMethod;",
@@ -1423,11 +1424,11 @@
ObjPtr<mirror::Object> GetEnclosingMethod(Handle<mirror::Class> klass) {
ClassData data(klass);
- const DexFile::AnnotationSetItem* annotation_set = FindAnnotationSetForClass(data);
+ const AnnotationSetItem* annotation_set = FindAnnotationSetForClass(data);
if (annotation_set == nullptr) {
return nullptr;
}
- const DexFile::AnnotationItem* annotation_item =
+ const AnnotationItem* annotation_item =
SearchAnnotationSet(data.GetDexFile(),
annotation_set,
"Ldalvik/annotation/EnclosingMethod;",
@@ -1441,11 +1442,11 @@
bool GetInnerClass(Handle<mirror::Class> klass, /*out*/ ObjPtr<mirror::String>* name) {
ClassData data(klass);
- const DexFile::AnnotationSetItem* annotation_set = FindAnnotationSetForClass(data);
+ const AnnotationSetItem* annotation_set = FindAnnotationSetForClass(data);
if (annotation_set == nullptr) {
return false;
}
- const DexFile::AnnotationItem* annotation_item = SearchAnnotationSet(
+ const AnnotationItem* annotation_item = SearchAnnotationSet(
data.GetDexFile(),
annotation_set,
"Ldalvik/annotation/InnerClass;",
@@ -1476,11 +1477,11 @@
bool GetInnerClassFlags(Handle<mirror::Class> klass, uint32_t* flags) {
ClassData data(klass);
- const DexFile::AnnotationSetItem* annotation_set = FindAnnotationSetForClass(data);
+ const AnnotationSetItem* annotation_set = FindAnnotationSetForClass(data);
if (annotation_set == nullptr) {
return false;
}
- const DexFile::AnnotationItem* annotation_item =
+ const AnnotationItem* annotation_item =
SearchAnnotationSet(data.GetDexFile(), annotation_set, "Ldalvik/annotation/InnerClass;",
DexFile::kDexVisibilitySystem);
if (annotation_item == nullptr) {
@@ -1509,7 +1510,7 @@
ObjPtr<mirror::ObjectArray<mirror::String>> GetSignatureAnnotationForClass(
Handle<mirror::Class> klass) {
ClassData data(klass);
- const DexFile::AnnotationSetItem* annotation_set = FindAnnotationSetForClass(data);
+ const AnnotationSetItem* annotation_set = FindAnnotationSetForClass(data);
if (annotation_set == nullptr) {
return nullptr;
}
@@ -1526,12 +1527,12 @@
}
ClassData data(klass);
- const DexFile::AnnotationSetItem* annotation_set = FindAnnotationSetForClass(data);
+ const AnnotationSetItem* annotation_set = FindAnnotationSetForClass(data);
if (annotation_set == nullptr) {
return nullptr;
}
- const DexFile::AnnotationItem* annotation_item = SearchAnnotationSet(
+ const AnnotationItem* annotation_item = SearchAnnotationSet(
data.GetDexFile(),
annotation_set,
"Ldalvik/annotation/SourceDebugExtension;",
@@ -1562,11 +1563,11 @@
bool IsClassAnnotationPresent(Handle<mirror::Class> klass, Handle<mirror::Class> annotation_class) {
ClassData data(klass);
- const DexFile::AnnotationSetItem* annotation_set = FindAnnotationSetForClass(data);
+ const AnnotationSetItem* annotation_set = FindAnnotationSetForClass(data);
if (annotation_set == nullptr) {
return false;
}
- const DexFile::AnnotationItem* annotation_item = GetAnnotationItemFromAnnotationSet(
+ const AnnotationItem* annotation_item = GetAnnotationItemFromAnnotationSet(
data, annotation_set, DexFile::kDexVisibilityRuntime, annotation_class);
return annotation_item != nullptr;
}
diff --git a/runtime/dex/dex_file_annotations.h b/runtime/dex/dex_file_annotations.h
index bde7891..3625cee 100644
--- a/runtime/dex/dex_file_annotations.h
+++ b/runtime/dex/dex_file_annotations.h
@@ -18,7 +18,6 @@
#define ART_RUNTIME_DEX_DEX_FILE_ANNOTATIONS_H_
#include "dex/dex_file.h"
-
#include "handle.h"
#include "mirror/dex_cache.h"
#include "mirror/object_array.h"
@@ -84,7 +83,7 @@
// @dalvik.annotation.optimization.CriticalNative with build visibility.
// If yes, return the associated access flags, i.e. kAccFastNative or kAccCriticalNative.
uint32_t GetNativeMethodAnnotationAccessFlags(const DexFile& dex_file,
- const DexFile::ClassDef& class_def,
+ const dex::ClassDef& class_def,
uint32_t method_index);
// Class annotations.
@@ -124,7 +123,7 @@
RuntimeEncodedStaticFieldValueIterator(Handle<mirror::DexCache> dex_cache,
Handle<mirror::ClassLoader> class_loader,
ClassLinker* linker,
- const DexFile::ClassDef& class_def)
+ const dex::ClassDef& class_def)
REQUIRES_SHARED(Locks::mutator_lock_)
: EncodedStaticFieldValueIterator(*dex_cache->GetDexFile(), class_def),
dex_cache_(dex_cache),
diff --git a/runtime/dex_to_dex_decompiler.cc b/runtime/dex_to_dex_decompiler.cc
index aff9b47..d078d6f 100644
--- a/runtime/dex_to_dex_decompiler.cc
+++ b/runtime/dex_to_dex_decompiler.cc
@@ -32,7 +32,7 @@
class DexDecompiler {
public:
DexDecompiler(const DexFile& dex_file,
- const DexFile::CodeItem& code_item,
+ const dex::CodeItem& code_item,
const ArrayRef<const uint8_t>& quickened_info,
bool decompile_return_instruction)
: code_item_accessor_(dex_file, &code_item),
@@ -194,7 +194,7 @@
}
bool ArtDecompileDEX(const DexFile& dex_file,
- const DexFile::CodeItem& code_item,
+ const dex::CodeItem& code_item,
const ArrayRef<const uint8_t>& quickened_info,
bool decompile_return_instruction) {
if (quickened_info.size() == 0 && !decompile_return_instruction) {
diff --git a/runtime/dex_to_dex_decompiler.h b/runtime/dex_to_dex_decompiler.h
index 93711d1..4b6b0f7 100644
--- a/runtime/dex_to_dex_decompiler.h
+++ b/runtime/dex_to_dex_decompiler.h
@@ -18,9 +18,15 @@
#define ART_RUNTIME_DEX_TO_DEX_DECOMPILER_H_
#include "base/array_ref.h"
-#include "dex/dex_file.h"
namespace art {
+
+class DexFile;
+
+namespace dex {
+struct CodeItem;
+} // namespace dex
+
namespace optimizer {
// "Decompile", that is unquicken, the code item provided, given the
@@ -30,7 +36,7 @@
// consistent with DexToDexCompiler, but we should really change it to
// DexFile::CodeItem*.
bool ArtDecompileDEX(const DexFile& dex_file,
- const DexFile::CodeItem& code_item,
+ const dex::CodeItem& code_item,
const ArrayRef<const uint8_t>& quickened_data,
bool decompile_return_instruction);
diff --git a/runtime/gc/collector/garbage_collector.cc b/runtime/gc/collector/garbage_collector.cc
index 2ef3d92..b8ad624 100644
--- a/runtime/gc/collector/garbage_collector.cc
+++ b/runtime/gc/collector/garbage_collector.cc
@@ -244,6 +244,7 @@
<< GetName() << " throughput: " << freed_objects / seconds << "/s / "
<< PrettySize(freed_bytes / seconds) << "/s"
<< " per cpu-time: "
+ << static_cast<uint64_t>(freed_bytes / cpu_seconds) << "/s / "
<< PrettySize(freed_bytes / cpu_seconds) << "/s\n";
}
diff --git a/runtime/gc/heap.cc b/runtime/gc/heap.cc
index bf8aaae..8d19cd0 100644
--- a/runtime/gc/heap.cc
+++ b/runtime/gc/heap.cc
@@ -1416,6 +1416,11 @@
TrimSpaces(self);
// Trim arenas that may have been used by JIT or verifier.
runtime->GetArenaPool()->TrimMaps();
+ {
+ // TODO: Move this to a callback called when startup is finished (b/120671223).
+ ScopedTrace trace2("Delete thread pool");
+ runtime->DeleteThreadPool();
+ }
}
class TrimIndirectReferenceTableClosure : public Closure {
@@ -3526,7 +3531,7 @@
running_collection_is_blocking_ = true;
VLOG(gc) << "Waiting for a blocking GC " << cause;
}
- ScopedTrace trace("GC: Wait For Completion");
+ SCOPED_TRACE << "GC: Wait For Completion " << cause;
// We must wait, change thread state then sleep on gc_complete_cond_;
gc_complete_cond_->Wait(self);
last_gc_type = last_gc_type_;
diff --git a/runtime/gc/space/image_space.cc b/runtime/gc/space/image_space.cc
index 66db063..64fd3cd 100644
--- a/runtime/gc/space/image_space.cc
+++ b/runtime/gc/space/image_space.cc
@@ -21,7 +21,6 @@
#include <unistd.h>
#include <random>
-#include <thread>
#include "android-base/stringprintf.h"
#include "android-base/strings.h"
@@ -685,40 +684,12 @@
REQUIRES_SHARED(Locks::mutator_lock_) {
TimingLogger logger(__PRETTY_FUNCTION__, /*precise=*/ true, VLOG_IS_ON(image));
- std::unique_ptr<ThreadPool> thread_pool;
std::unique_ptr<ImageSpace> space = Init(image_filename,
image_location,
oat_file,
&logger,
- &thread_pool,
image_reservation,
error_msg);
- if (thread_pool != nullptr) {
- // Delay the thread pool deletion to prevent the deletion slowing down the startup by causing
- // preemption. TODO: Just do this in heap trim.
- static constexpr uint64_t kThreadPoolDeleteDelay = MsToNs(5000);
-
- class DeleteThreadPoolTask : public HeapTask {
- public:
- explicit DeleteThreadPoolTask(std::unique_ptr<ThreadPool>&& thread_pool)
- : HeapTask(NanoTime() + kThreadPoolDeleteDelay), thread_pool_(std::move(thread_pool)) {}
-
- void Run(Thread* self) override {
- ScopedTrace trace("DestroyThreadPool");
- ScopedThreadStateChange stsc(self, kNative);
- thread_pool_.reset();
- }
-
- private:
- std::unique_ptr<ThreadPool> thread_pool_;
- };
- gc::TaskProcessor* const processor = Runtime::Current()->GetHeap()->GetTaskProcessor();
- // The thread pool is already done being used since Init has finished running. Deleting the
- // thread pool is done async since it takes a non-trivial amount of time to do.
- if (processor != nullptr) {
- processor->AddTask(Thread::Current(), new DeleteThreadPoolTask(std::move(thread_pool)));
- }
- }
if (space != nullptr) {
uint32_t expected_reservation_size =
RoundUp(space->GetImageHeader().GetImageSize(), kPageSize);
@@ -779,7 +750,6 @@
const char* image_location,
const OatFile* oat_file,
TimingLogger* logger,
- std::unique_ptr<ThreadPool>* thread_pool,
/*inout*/MemMap* image_reservation,
/*out*/std::string* error_msg)
REQUIRES_SHARED(Locks::mutator_lock_) {
@@ -856,18 +826,6 @@
return nullptr;
}
- const size_t kMinBlocks = 2;
- if (thread_pool != nullptr && image_header->GetBlockCount() >= kMinBlocks) {
- TimingLogger::ScopedTiming timing("CreateThreadPool", logger);
- ScopedThreadStateChange stsc(Thread::Current(), kNative);
- constexpr size_t kStackSize = 64 * KB;
- constexpr size_t kMaxRuntimeWorkers = 4u;
- const size_t num_workers =
- std::min(static_cast<size_t>(std::thread::hardware_concurrency()), kMaxRuntimeWorkers);
- thread_pool->reset(new ThreadPool("Image", num_workers, /*create_peers=*/false, kStackSize));
- thread_pool->get()->StartWorkers(Thread::Current());
- }
-
// GetImageBegin is the preferred address to map the image. If we manage to map the
// image at the image begin, the amount of fixup work required is minimized.
// If it is pic we will retry with error_msg for the failure case. Pass a null error_msg to
@@ -880,7 +838,6 @@
*image_header,
file->Fd(),
logger,
- thread_pool != nullptr ? thread_pool->get() : nullptr,
image_reservation,
error_msg);
if (!map.IsValid()) {
@@ -971,7 +928,6 @@
const ImageHeader& image_header,
int fd,
TimingLogger* logger,
- ThreadPool* pool,
/*inout*/MemMap* image_reservation,
/*out*/std::string* error_msg) {
TimingLogger::ScopedTiming timing("MapImageFile", logger);
@@ -1015,9 +971,12 @@
}
memcpy(map.Begin(), &image_header, sizeof(ImageHeader));
+ Runtime::ScopedThreadPoolUsage stpu;
+ ThreadPool* const pool = stpu.GetThreadPool();
const uint64_t start = NanoTime();
Thread* const self = Thread::Current();
- const bool use_parallel = pool != nullptr;
+ static constexpr size_t kMinBlocks = 2u;
+ const bool use_parallel = pool != nullptr && image_header.GetBlockCount() >= kMinBlocks;
for (const ImageHeader::Block& block : image_header.GetBlocks(temp_map.Begin())) {
auto function = [&](Thread*) {
const uint64_t start2 = NanoTime();
@@ -1062,32 +1021,30 @@
app_oat_(app_oat) {}
// Return the relocated address of a heap object.
+ // Null checks must be performed in the caller (for performance reasons).
template <typename T>
ALWAYS_INLINE T* ForwardObject(T* src) const {
+ DCHECK(src != nullptr);
const uintptr_t uint_src = reinterpret_cast<uintptr_t>(src);
if (boot_image_.InSource(uint_src)) {
return reinterpret_cast<T*>(boot_image_.ToDest(uint_src));
}
- if (app_image_.InSource(uint_src)) {
- return reinterpret_cast<T*>(app_image_.ToDest(uint_src));
- }
// Since we are fixing up the app image, there should only be pointers to the app image and
// boot image.
- DCHECK(src == nullptr) << reinterpret_cast<const void*>(src);
- return src;
+ DCHECK(app_image_.InSource(uint_src)) << reinterpret_cast<const void*>(src);
+ return reinterpret_cast<T*>(app_image_.ToDest(uint_src));
}
// Return the relocated address of a code pointer (contained by an oat file).
+ // Null checks must be performed in the caller (for performance reasons).
ALWAYS_INLINE const void* ForwardCode(const void* src) const {
+ DCHECK(src != nullptr);
const uintptr_t uint_src = reinterpret_cast<uintptr_t>(src);
if (boot_image_.InSource(uint_src)) {
return reinterpret_cast<const void*>(boot_image_.ToDest(uint_src));
}
- if (app_oat_.InSource(uint_src)) {
- return reinterpret_cast<const void*>(app_oat_.ToDest(uint_src));
- }
- DCHECK(src == nullptr) << src;
- return src;
+ DCHECK(app_oat_.InSource(uint_src)) << src;
+ return reinterpret_cast<const void*>(app_oat_.ToDest(uint_src));
}
// Must be called on pointers that already have been relocated to the destination relocation.
@@ -1157,9 +1114,12 @@
// Space is not yet added to the heap, don't do a read barrier.
mirror::Object* ref = obj->GetFieldObject<mirror::Object, kVerifyNone, kWithoutReadBarrier>(
offset);
- // Use SetFieldObjectWithoutWriteBarrier to avoid card marking since we are writing to the
- // image.
- obj->SetFieldObjectWithoutWriteBarrier<false, true, kVerifyNone>(offset, ForwardObject(ref));
+ if (ref != nullptr) {
+ // Use SetFieldObjectWithoutWriteBarrier to avoid card marking since we are writing to the
+ // image.
+ obj->SetFieldObjectWithoutWriteBarrier<false, true, kVerifyNone>(
+ offset, ForwardObject(ref));
+ }
}
// java.lang.ref.Reference visitor.
@@ -1167,9 +1127,11 @@
ObjPtr<mirror::Reference> ref) const
REQUIRES_SHARED(Locks::mutator_lock_) REQUIRES(Locks::heap_bitmap_lock_) {
mirror::Object* obj = ref->GetReferent<kWithoutReadBarrier>();
- ref->SetFieldObjectWithoutWriteBarrier<false, true, kVerifyNone>(
- mirror::Reference::ReferentOffset(),
- ForwardObject(obj));
+ if (obj != nullptr) {
+ ref->SetFieldObjectWithoutWriteBarrier<false, true, kVerifyNone>(
+ mirror::Reference::ReferentOffset(),
+ ForwardObject(obj));
+ }
}
void operator()(mirror::Object* obj) const
@@ -1377,7 +1339,7 @@
// Fixup objects may read fields in the boot image, use the mutator lock here for sanity.
// Though its probably not required.
- TimingLogger::ScopedTiming timing("Fixup cobjects", &logger);
+ TimingLogger::ScopedTiming timing("Fixup objects", &logger);
ScopedObjectAccess soa(Thread::Current());
// Need to update the image to be at the target base.
const ImageSection& objects_section = image_header.GetObjectsSection();
@@ -1648,46 +1610,14 @@
const uint32_t diff_;
};
- class PatchedObjectsMap {
- public:
- PatchedObjectsMap(uint8_t* image_space_begin, size_t size)
- : image_space_begin_(image_space_begin),
- data_(new uint8_t[BitsToBytesRoundUp(NumLocations(size))]),
- visited_objects_(data_.get(), /*bit_start=*/ 0u, NumLocations(size)) {
- DCHECK_ALIGNED(image_space_begin_, kObjectAlignment);
- std::memset(data_.get(), 0, BitsToBytesRoundUp(NumLocations(size)));
- }
-
- ALWAYS_INLINE bool IsVisited(mirror::Object* object) const {
- return visited_objects_.LoadBit(GetIndex(object));
- }
-
- ALWAYS_INLINE void MarkVisited(mirror::Object* object) {
- DCHECK(!IsVisited(object));
- visited_objects_.StoreBit(GetIndex(object), /*value=*/ true);
- }
-
- private:
- static size_t NumLocations(size_t size) {
- DCHECK_ALIGNED(size, kObjectAlignment);
- return size / kObjectAlignment;
- }
-
- size_t GetIndex(mirror::Object* object) const {
- DCHECK_ALIGNED(object, kObjectAlignment);
- return (reinterpret_cast<uint8_t*>(object) - image_space_begin_) / kObjectAlignment;
- }
-
- uint8_t* const image_space_begin_;
- const std::unique_ptr<uint8_t[]> data_;
- BitMemoryRegion visited_objects_;
- };
-
template <PointerSize kPointerSize>
static void DoRelocateSpaces(const std::vector<std::unique_ptr<ImageSpace>>& spaces,
uint32_t diff) REQUIRES_SHARED(Locks::mutator_lock_) {
- PatchedObjectsMap patched_objects(spaces.front()->Begin(),
- spaces.back()->End() - spaces.front()->Begin());
+ std::unique_ptr<gc::accounting::ContinuousSpaceBitmap> patched_objects(
+ gc::accounting::ContinuousSpaceBitmap::Create(
+ "Marked objects",
+ spaces.front()->Begin(),
+ spaces.back()->End() - spaces.front()->Begin()));
using PatchRelocateVisitor = PatchObjectVisitor<kPointerSize, RelocateVisitor>;
RelocateVisitor relocate_visitor(diff);
PatchRelocateVisitor patch_object_visitor(relocate_visitor);
@@ -1738,7 +1668,7 @@
slot.VisitRoot(class_table_visitor);
mirror::Class* klass = slot.Read<kWithoutReadBarrier>();
DCHECK(klass != nullptr);
- patched_objects.MarkVisited(klass);
+ patched_objects->Set(klass);
patch_object_visitor.VisitClass(klass);
if (kIsDebugBuild) {
mirror::Class* class_class = klass->GetClass<kVerifyNone, kWithoutReadBarrier>();
@@ -1750,8 +1680,7 @@
}
// Then patch the non-embedded vtable and iftable.
mirror::PointerArray* vtable = klass->GetVTable<kVerifyNone, kWithoutReadBarrier>();
- if (vtable != nullptr && !patched_objects.IsVisited(vtable)) {
- patched_objects.MarkVisited(vtable);
+ if (vtable != nullptr && !patched_objects->Set(vtable)) {
patch_object_visitor.VisitPointerArray(vtable);
}
auto* iftable = klass->GetIfTable<kVerifyNone, kWithoutReadBarrier>();
@@ -1763,8 +1692,7 @@
if (unpatched_ifarray != nullptr) {
// The iftable has not been patched, so we need to explicitly adjust the pointer.
mirror::PointerArray* ifarray = relocate_visitor(unpatched_ifarray);
- if (!patched_objects.IsVisited(ifarray)) {
- patched_objects.MarkVisited(ifarray);
+ if (!patched_objects->Set(ifarray)) {
patch_object_visitor.VisitPointerArray(ifarray);
}
}
@@ -1783,13 +1711,13 @@
ObjPtr<mirror::ObjectArray<mirror::Object>> image_roots =
image_header.GetImageRoots<kWithoutReadBarrier>();
- patched_objects.MarkVisited(image_roots.Ptr());
+ patched_objects->Set(image_roots.Ptr());
patch_object_visitor.VisitObject(image_roots.Ptr());
ObjPtr<mirror::ObjectArray<mirror::Class>> class_roots =
ObjPtr<mirror::ObjectArray<mirror::Class>>::DownCast(MakeObjPtr(
image_header.GetImageRoot<kWithoutReadBarrier>(ImageHeader::kClassRoots)));
- patched_objects.MarkVisited(class_roots.Ptr());
+ patched_objects->Set(class_roots.Ptr());
patch_object_visitor.VisitObject(class_roots.Ptr());
method_class = GetClassRoot<mirror::Method, kWithoutReadBarrier>(class_roots);
@@ -1805,8 +1733,8 @@
DCHECK_ALIGNED(objects_end, kObjectAlignment);
for (uint32_t pos = sizeof(ImageHeader); pos != objects_end; ) {
mirror::Object* object = reinterpret_cast<mirror::Object*>(space->Begin() + pos);
- if (!patched_objects.IsVisited(object)) {
- // This is the last pass over objects, so we do not need to MarkVisited().
+ if (!patched_objects->Test(object)) {
+ // This is the last pass over objects, so we do not need to Set().
patch_object_visitor.VisitObject(object);
mirror::Class* klass = object->GetClass<kVerifyNone, kWithoutReadBarrier>();
if (klass->IsDexCacheClass<kVerifyNone>()) {
@@ -1915,7 +1843,6 @@
image_location.c_str(),
/*oat_file=*/ nullptr,
logger,
- /*thread_pool=*/ nullptr,
image_reservation,
error_msg);
}
diff --git a/runtime/hidden_api.cc b/runtime/hidden_api.cc
index e0939dd..c146daa 100644
--- a/runtime/hidden_api.cc
+++ b/runtime/hidden_api.cc
@@ -98,7 +98,7 @@
MemberSignature::MemberSignature(const ClassAccessor::Field& field) {
const DexFile& dex_file = field.GetDexFile();
- const DexFile::FieldId& field_id = dex_file.GetFieldId(field.GetIndex());
+ const dex::FieldId& field_id = dex_file.GetFieldId(field.GetIndex());
class_name_ = dex_file.GetFieldDeclaringClassDescriptor(field_id);
member_name_ = dex_file.GetFieldName(field_id);
type_signature_ = dex_file.GetFieldTypeDescriptor(field_id);
@@ -107,7 +107,7 @@
MemberSignature::MemberSignature(const ClassAccessor::Method& method) {
const DexFile& dex_file = method.GetDexFile();
- const DexFile::MethodId& method_id = dex_file.GetMethodId(method.GetIndex());
+ const dex::MethodId& method_id = dex_file.GetMethodId(method.GetIndex());
class_name_ = dex_file.GetMethodDeclaringClassDescriptor(method_id);
member_name_ = dex_file.GetMethodName(method_id);
type_signature_ = dex_file.GetMethodSignature(method_id).ToString();
@@ -282,14 +282,14 @@
}
static void VisitMembers(const DexFile& dex_file,
- const DexFile::ClassDef& class_def,
+ const dex::ClassDef& class_def,
const std::function<void(const ClassAccessor::Field&)>& fn_visit) {
ClassAccessor accessor(dex_file, class_def, /* parse_hiddenapi_class_data= */ true);
accessor.VisitFields(fn_visit, fn_visit);
}
static void VisitMembers(const DexFile& dex_file,
- const DexFile::ClassDef& class_def,
+ const dex::ClassDef& class_def,
const std::function<void(const ClassAccessor::Method&)>& fn_visit) {
ClassAccessor accessor(dex_file, class_def, /* parse_hiddenapi_class_data= */ true);
accessor.VisitMethods(fn_visit, fn_visit);
@@ -317,7 +317,7 @@
if (LIKELY(original_dex == nullptr)) {
// Class is not redefined. Find the class def, iterate over its members and
// find the entry corresponding to this `member`.
- const DexFile::ClassDef* class_def = declaring_class->GetClassDef();
+ const dex::ClassDef* class_def = declaring_class->GetClassDef();
if (class_def == nullptr) {
flags = kNoDexFlags;
} else {
@@ -338,7 +338,7 @@
// to access a hidden member of a JVMTI-redefined class.
uint16_t class_def_idx = ext->GetPreRedefineClassDefIndex();
DCHECK_NE(class_def_idx, DexFile::kDexNoIndex16);
- const DexFile::ClassDef& original_class_def = original_dex->GetClassDef(class_def_idx);
+ const dex::ClassDef& original_class_def = original_dex->GetClassDef(class_def_idx);
MemberSignature member_signature(member);
auto fn_visit = [&](const AccessorType& dex_member) {
MemberSignature cur_signature(dex_member);
diff --git a/runtime/hidden_api.h b/runtime/hidden_api.h
index 13bead2..a0eeae2 100644
--- a/runtime/hidden_api.h
+++ b/runtime/hidden_api.h
@@ -236,6 +236,7 @@
case Intrinsics::kUnsafeFullFence:
case Intrinsics::kCRC32Update:
case Intrinsics::kCRC32UpdateBytes:
+ case Intrinsics::kCRC32UpdateByteBuffer:
case Intrinsics::kStringNewStringFromBytes:
case Intrinsics::kStringNewStringFromChars:
case Intrinsics::kStringNewStringFromString:
diff --git a/runtime/image.cc b/runtime/image.cc
index fe1d88f..b6bb0b1 100644
--- a/runtime/image.cc
+++ b/runtime/image.cc
@@ -29,7 +29,7 @@
namespace art {
const uint8_t ImageHeader::kImageMagic[] = { 'a', 'r', 't', '\n' };
-const uint8_t ImageHeader::kImageVersion[] = { '0', '7', '3', '\0' }; // Image reservation.
+const uint8_t ImageHeader::kImageVersion[] = { '0', '7', '4', '\0' }; // CRC32UpdateBB intrinsic
ImageHeader::ImageHeader(uint32_t image_reservation_size,
uint32_t component_count,
diff --git a/runtime/imtable-inl.h b/runtime/imtable-inl.h
index 93346f6..21e3eb1 100644
--- a/runtime/imtable-inl.h
+++ b/runtime/imtable-inl.h
@@ -46,7 +46,7 @@
}
const DexFile* dex_file = method->GetDexFile();
- const DexFile::MethodId& method_id = dex_file->GetMethodId(method->GetDexMethodIndex());
+ const dex::MethodId& method_id = dex_file->GetMethodId(method->GetDexMethodIndex());
// Class descriptor for the class component.
*class_hash = ComputeModifiedUtf8Hash(dex_file->GetMethodDeclaringClassDescriptor(method_id));
@@ -54,7 +54,7 @@
// Method name for the method component.
*name_hash = ComputeModifiedUtf8Hash(dex_file->GetMethodName(method_id));
- const DexFile::ProtoId& proto_id = dex_file->GetMethodPrototype(method_id);
+ const dex::ProtoId& proto_id = dex_file->GetMethodPrototype(method_id);
// Read the proto for the signature component.
uint32_t tmp = ComputeModifiedUtf8Hash(
@@ -63,10 +63,10 @@
// Mix in the argument types.
// Note: we could consider just using the shorty. This would be faster, at the price of
// potential collisions.
- const DexFile::TypeList* param_types = dex_file->GetProtoParameters(proto_id);
+ const dex::TypeList* param_types = dex_file->GetProtoParameters(proto_id);
if (param_types != nullptr) {
for (size_t i = 0; i != param_types->Size(); ++i) {
- const DexFile::TypeItem& type = param_types->GetTypeItem(i);
+ const dex::TypeItem& type = param_types->GetTypeItem(i);
tmp = 31 * tmp + ComputeModifiedUtf8Hash(
dex_file->GetTypeDescriptor(dex_file->GetTypeId(type.type_idx_)));
}
diff --git a/runtime/interpreter/interpreter_common.cc b/runtime/interpreter/interpreter_common.cc
index e52a1c9..7a40ab4 100644
--- a/runtime/interpreter/interpreter_common.cc
+++ b/runtime/interpreter/interpreter_common.cc
@@ -1168,7 +1168,7 @@
const DexFile* dex_file,
uint32_t call_site_idx)
REQUIRES_SHARED(Locks::mutator_lock_) {
- const DexFile::CallSiteIdItem& csi = dex_file->GetCallSiteId(call_site_idx);
+ const dex::CallSiteIdItem& csi = dex_file->GetCallSiteId(call_site_idx);
CallSiteArrayValueIterator it(*dex_file, csi);
DCHECK_GE(it.Size(), 1u);
@@ -1223,7 +1223,7 @@
static constexpr size_t kMandatoryArgumentsCount = 3;
ArtMethod* referrer = shadow_frame.GetMethod();
const DexFile* dex_file = referrer->GetDexFile();
- const DexFile::CallSiteIdItem& csi = dex_file->GetCallSiteId(call_site_idx);
+ const dex::CallSiteIdItem& csi = dex_file->GetCallSiteId(call_site_idx);
CallSiteArrayValueIterator it(*dex_file, csi);
if (it.Size() < kMandatoryArgumentsCount) {
ThrowBootstrapMethodError("Truncated bootstrap arguments (%zu < %zu)",
@@ -1637,7 +1637,7 @@
// We need to do runtime check on reference assignment. We need to load the shorty
// to get the exact type of each reference argument.
- const DexFile::TypeList* params = method->GetParameterTypeList();
+ const dex::TypeList* params = method->GetParameterTypeList();
uint32_t shorty_len = 0;
const char* shorty = method->GetShorty(&shorty_len);
diff --git a/runtime/interpreter/interpreter_common.h b/runtime/interpreter/interpreter_common.h
index a633a63..6366035 100644
--- a/runtime/interpreter/interpreter_common.h
+++ b/runtime/interpreter/interpreter_common.h
@@ -44,7 +44,7 @@
#include "handle_scope-inl.h"
#include "interpreter_mterp_impl.h"
#include "interpreter_switch_impl.h"
-#include "jit/jit.h"
+#include "jit/jit-inl.h"
#include "mirror/call_site.h"
#include "mirror/class-inl.h"
#include "mirror/dex_cache.h"
@@ -621,7 +621,7 @@
static inline bool IsStringInit(const DexFile* dex_file, uint32_t method_idx)
REQUIRES_SHARED(Locks::mutator_lock_) {
- const DexFile::MethodId& method_id = dex_file->GetMethodId(method_idx);
+ const dex::MethodId& method_id = dex_file->GetMethodId(method_idx);
const char* class_name = dex_file->StringByTypeIdx(method_id.class_idx_);
const char* method_name = dex_file->GetMethodName(method_id);
// Instead of calling ResolveMethod() which has suspend point and can trigger
diff --git a/runtime/interpreter/interpreter_intrinsics.cc b/runtime/interpreter/interpreter_intrinsics.cc
index 16e118c..2127f1d 100644
--- a/runtime/interpreter/interpreter_intrinsics.cc
+++ b/runtime/interpreter/interpreter_intrinsics.cc
@@ -560,6 +560,7 @@
UNIMPLEMENTED_CASE(ThreadInterrupted /* ()Z */)
UNIMPLEMENTED_CASE(CRC32Update /* (II)I */)
UNIMPLEMENTED_CASE(CRC32UpdateBytes /* (I[BII)I */)
+ UNIMPLEMENTED_CASE(CRC32UpdateByteBuffer /* (IJII)I */)
INTRINSIC_CASE(VarHandleFullFence)
INTRINSIC_CASE(VarHandleAcquireFence)
INTRINSIC_CASE(VarHandleReleaseFence)
diff --git a/runtime/interpreter/interpreter_switch_impl-inl.h b/runtime/interpreter/interpreter_switch_impl-inl.h
index 94cb3de..aec2aa2 100644
--- a/runtime/interpreter/interpreter_switch_impl-inl.h
+++ b/runtime/interpreter/interpreter_switch_impl-inl.h
@@ -26,7 +26,7 @@
#include "dex/dex_instruction_list.h"
#include "experimental_flags.h"
#include "interpreter_common.h"
-#include "jit/jit.h"
+#include "jit/jit-inl.h"
#include "jvalue-inl.h"
#include "mirror/string-alloc-inl.h"
#include "nth_caller_visitor.h"
diff --git a/runtime/interpreter/shadow_frame.h b/runtime/interpreter/shadow_frame.h
index ca98999..3f6b729 100644
--- a/runtime/interpreter/shadow_frame.h
+++ b/runtime/interpreter/shadow_frame.h
@@ -23,7 +23,6 @@
#include "base/locks.h"
#include "base/macros.h"
-#include "dex/dex_file.h"
#include "lock_count_data.h"
#include "read_barrier.h"
#include "stack_reference.h"
diff --git a/runtime/intrinsics_list.h b/runtime/intrinsics_list.h
index db43b24..57e81a7 100644
--- a/runtime/intrinsics_list.h
+++ b/runtime/intrinsics_list.h
@@ -221,6 +221,7 @@
V(ReachabilityFence, kStatic, kNeedsEnvironmentOrCache, kWriteSideEffects, kNoThrow, "Ljava/lang/ref/Reference;", "reachabilityFence", "(Ljava/lang/Object;)V") \
V(CRC32Update, kStatic, kNeedsEnvironmentOrCache, kNoSideEffects, kNoThrow, "Ljava/util/zip/CRC32;", "update", "(II)I") \
V(CRC32UpdateBytes, kStatic, kNeedsEnvironmentOrCache, kReadSideEffects, kCanThrow, "Ljava/util/zip/CRC32;", "updateBytes", "(I[BII)I") \
+ V(CRC32UpdateByteBuffer, kStatic, kNeedsEnvironmentOrCache, kReadSideEffects, kNoThrow, "Ljava/util/zip/CRC32;", "updateByteBuffer", "(IJII)I") \
SIGNATURE_POLYMORPHIC_INTRINSICS_LIST(V)
#endif // ART_RUNTIME_INTRINSICS_LIST_H_
diff --git a/runtime/jit/debugger_interface.cc b/runtime/jit/debugger_interface.cc
index 7aa6ddf..99f9387 100644
--- a/runtime/jit/debugger_interface.cc
+++ b/runtime/jit/debugger_interface.cc
@@ -274,7 +274,7 @@
}
// Mapping from handle to entry. Used to manage life-time of the entries.
-static std::map<const void*, JITCodeEntry*> g_jit_debug_entries GUARDED_BY(g_jit_debug_lock);
+static std::multimap<const void*, JITCodeEntry*> g_jit_debug_entries GUARDED_BY(g_jit_debug_lock);
// Number of entries added since last packing. Used to pack entries in bulk.
static size_t g_jit_num_unpacked_entries GUARDED_BY(g_jit_debug_lock) = 0;
@@ -383,8 +383,7 @@
// (this only happens when --generate-debug-info flag is enabled for the purpose
// of being debugged with gdb; it does not happen for debuggable apps by default).
if (code_ptr != nullptr) {
- bool ok = g_jit_debug_entries.emplace(code_ptr, entry).second;
- DCHECK(ok) << "Native debug entry already exists for " << std::hex << code_ptr;
+ g_jit_debug_entries.emplace(code_ptr, entry);
// Count how many entries we have added since the last mini-debug-info packing.
// We avoid g_jit_debug_entries.size() here because it can shrink during packing.
g_jit_num_unpacked_entries++;
diff --git a/runtime/jit/jit-inl.h b/runtime/jit/jit-inl.h
new file mode 100644
index 0000000..80324ad
--- /dev/null
+++ b/runtime/jit/jit-inl.h
@@ -0,0 +1,69 @@
+/*
+ * Copyright 2018 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef ART_RUNTIME_JIT_JIT_INL_H_
+#define ART_RUNTIME_JIT_JIT_INL_H_
+
+#include "jit/jit.h"
+
+#include "art_method.h"
+#include "base/bit_utils.h"
+#include "thread.h"
+#include "runtime-inl.h"
+
+namespace art {
+namespace jit {
+
+inline bool Jit::ShouldUsePriorityThreadWeight(Thread* self) {
+ return self->IsJitSensitiveThread() && Runtime::Current()->InJankPerceptibleProcessState();
+}
+
+inline void Jit::AddSamples(Thread* self,
+ ArtMethod* method,
+ uint16_t samples,
+ bool with_backedges) {
+ if (Jit::ShouldUsePriorityThreadWeight(self)) {
+ samples *= PriorityThreadWeight();
+ }
+ uint32_t old_count = method->GetCounter();
+ uint32_t new_count = old_count + samples;
+
+ // The full check is fairly expensive so we just add to hotness most of the time,
+ // and we do the full check only when some of the higher bits of the count change.
+ // NB: The method needs to see the transitions of the counter past the thresholds.
+ uint32_t old_batch = RoundDown(old_count, kJitSamplesBatchSize); // Clear lower bits.
+ uint32_t new_batch = RoundDown(new_count, kJitSamplesBatchSize); // Clear lower bits.
+ if (UNLIKELY(old_batch == 0)) {
+ // For low sample counts, we check every time (which is important for tests).
+ if (!MaybeCompileMethod(self, method, old_count, new_count, with_backedges)) {
+ // Tests may check that the counter is 0 for methods that we never compile.
+ return; // Ignore the samples for now and retry later.
+ }
+ } else if (UNLIKELY(old_batch != new_batch)) {
+ // For high sample counts, we check only when we move past the batch boundary.
+ if (!MaybeCompileMethod(self, method, old_batch, new_batch, with_backedges)) {
+ // OSR compilation will ignore the samples if they don't have backedges.
+ return; // Ignore the samples for now and retry later.
+ }
+ }
+
+ method->SetCounter(new_count);
+}
+
+} // namespace jit
+} // namespace art
+
+#endif // ART_RUNTIME_JIT_JIT_INL_H_
diff --git a/runtime/jit/jit.cc b/runtime/jit/jit.cc
index 03c97f4..d44bd59 100644
--- a/runtime/jit/jit.cc
+++ b/runtime/jit/jit.cc
@@ -28,6 +28,7 @@
#include "debugger.h"
#include "entrypoints/runtime_asm_entrypoints.h"
#include "interpreter/interpreter.h"
+#include "jit-inl.h"
#include "jit_code_cache.h"
#include "jni/java_vm_ext.h"
#include "mirror/method_handle_impl.h"
@@ -68,6 +69,14 @@
};
DEFINE_RUNTIME_DEBUG_FLAG(StressModeHelper, kSlowMode);
+uint32_t JitOptions::RoundUpThreshold(uint32_t threshold) {
+ if (threshold > kJitSamplesBatchSize) {
+ threshold = RoundUp(threshold, kJitSamplesBatchSize);
+ }
+ CHECK_LE(threshold, std::numeric_limits<uint16_t>::max());
+ return threshold;
+}
+
JitOptions* JitOptions::CreateFromRuntimeArguments(const RuntimeArgumentMap& options) {
auto* jit_options = new JitOptions;
jit_options->use_jit_compilation_ = options.GetOrDefault(RuntimeArgumentMap::UseJitCompilation);
@@ -93,30 +102,25 @@
: kJitStressDefaultCompileThreshold)
: kJitDefaultCompileThreshold;
}
- if (jit_options->compile_threshold_ > std::numeric_limits<uint16_t>::max()) {
- LOG(FATAL) << "Method compilation threshold is above its internal limit.";
- }
+ jit_options->compile_threshold_ = RoundUpThreshold(jit_options->compile_threshold_);
if (options.Exists(RuntimeArgumentMap::JITWarmupThreshold)) {
jit_options->warmup_threshold_ = *options.Get(RuntimeArgumentMap::JITWarmupThreshold);
- if (jit_options->warmup_threshold_ > std::numeric_limits<uint16_t>::max()) {
- LOG(FATAL) << "Method warmup threshold is above its internal limit.";
- }
} else {
jit_options->warmup_threshold_ = jit_options->compile_threshold_ / 2;
}
+ jit_options->warmup_threshold_ = RoundUpThreshold(jit_options->warmup_threshold_);
if (options.Exists(RuntimeArgumentMap::JITOsrThreshold)) {
jit_options->osr_threshold_ = *options.Get(RuntimeArgumentMap::JITOsrThreshold);
- if (jit_options->osr_threshold_ > std::numeric_limits<uint16_t>::max()) {
- LOG(FATAL) << "Method on stack replacement threshold is above its internal limit.";
- }
} else {
jit_options->osr_threshold_ = jit_options->compile_threshold_ * 2;
if (jit_options->osr_threshold_ > std::numeric_limits<uint16_t>::max()) {
- jit_options->osr_threshold_ = std::numeric_limits<uint16_t>::max();
+ jit_options->osr_threshold_ =
+ RoundDown(std::numeric_limits<uint16_t>::max(), kJitSamplesBatchSize);
}
}
+ jit_options->osr_threshold_ = RoundUpThreshold(jit_options->osr_threshold_);
if (options.Exists(RuntimeArgumentMap::JITPriorityThreadWeight)) {
jit_options->priority_thread_weight_ =
@@ -149,10 +153,6 @@
return jit_options;
}
-bool Jit::ShouldUsePriorityThreadWeight(Thread* self) {
- return self->IsJitSensitiveThread() && Runtime::Current()->InJankPerceptibleProcessState();
-}
-
void Jit::DumpInfo(std::ostream& os) {
code_cache_->Dump(os);
cumulative_timings_.Dump(os);
@@ -639,20 +639,24 @@
return false;
}
-void Jit::AddSamples(Thread* self, ArtMethod* method, uint16_t count, bool with_backedges) {
+bool Jit::MaybeCompileMethod(Thread* self,
+ ArtMethod* method,
+ uint32_t old_count,
+ uint32_t new_count,
+ bool with_backedges) {
if (thread_pool_ == nullptr) {
// Should only see this when shutting down, starting up, or in safe mode.
DCHECK(Runtime::Current()->IsShuttingDown(self) ||
!Runtime::Current()->IsFinishedStarting() ||
Runtime::Current()->IsSafeMode());
- return;
+ return false;
}
if (IgnoreSamplesForMethod(method)) {
- return;
+ return false;
}
if (HotMethodThreshold() == 0) {
// Tests might request JIT on first use (compiled synchronously in the interpreter).
- return;
+ return false;
}
DCHECK(thread_pool_ != nullptr);
DCHECK_GT(WarmMethodThreshold(), 0);
@@ -661,15 +665,9 @@
DCHECK_GE(PriorityThreadWeight(), 1);
DCHECK_LE(PriorityThreadWeight(), HotMethodThreshold());
- uint16_t starting_count = method->GetCounter();
- if (Jit::ShouldUsePriorityThreadWeight(self)) {
- count *= PriorityThreadWeight();
- }
- uint32_t new_count = starting_count + count;
- // Note: Native method have no "warm" state or profiling info.
- if (LIKELY(!method->IsNative()) && starting_count < WarmMethodThreshold()) {
- if ((new_count >= WarmMethodThreshold()) &&
- (method->GetProfilingInfo(kRuntimePointerSize) == nullptr)) {
+ if (old_count < WarmMethodThreshold() && new_count >= WarmMethodThreshold()) {
+ // Note: Native method have no "warm" state or profiling info.
+ if (!method->IsNative() && method->GetProfilingInfo(kRuntimePointerSize) == nullptr) {
bool success = ProfilingInfo::Create(self, method, /* retry_allocation= */ false);
if (success) {
VLOG(jit) << "Start profiling " << method->PrettyMethod();
@@ -679,7 +677,7 @@
// Calling ProfilingInfo::Create might put us in a suspended state, which could
// lead to the thread pool being deleted when we are shutting down.
DCHECK(Runtime::Current()->IsShuttingDown(self));
- return;
+ return false;
}
if (!success) {
@@ -689,32 +687,27 @@
self, new JitCompileTask(method, JitCompileTask::TaskKind::kAllocateProfile));
}
}
- // Avoid jumping more than one state at a time.
- new_count = std::min(new_count, static_cast<uint32_t>(HotMethodThreshold() - 1));
- } else if (UseJitCompilation()) {
- if (starting_count < HotMethodThreshold()) {
- if ((new_count >= HotMethodThreshold()) &&
- !code_cache_->ContainsPc(method->GetEntryPointFromQuickCompiledCode())) {
+ }
+ if (UseJitCompilation()) {
+ if (old_count < HotMethodThreshold() && new_count >= HotMethodThreshold()) {
+ if (!code_cache_->ContainsPc(method->GetEntryPointFromQuickCompiledCode())) {
DCHECK(thread_pool_ != nullptr);
thread_pool_->AddTask(self, new JitCompileTask(method, JitCompileTask::TaskKind::kCompile));
}
- // Avoid jumping more than one state at a time.
- new_count = std::min(new_count, static_cast<uint32_t>(OSRMethodThreshold() - 1));
- } else if (starting_count < OSRMethodThreshold()) {
+ }
+ if (old_count < OSRMethodThreshold() && new_count >= OSRMethodThreshold()) {
if (!with_backedges) {
- // If the samples don't contain any back edge, we don't increment the hotness.
- return;
+ return false;
}
DCHECK(!method->IsNative()); // No back edges reported for native methods.
- if ((new_count >= OSRMethodThreshold()) && !code_cache_->IsOsrCompiled(method)) {
+ if (!code_cache_->IsOsrCompiled(method)) {
DCHECK(thread_pool_ != nullptr);
thread_pool_->AddTask(
self, new JitCompileTask(method, JitCompileTask::TaskKind::kCompileOsr));
}
}
}
- // Update hotness counter
- method->SetCounter(new_count);
+ return true;
}
class ScopedSetRuntimeThread {
diff --git a/runtime/jit/jit.h b/runtime/jit/jit.h
index e5c9766..714db3a 100644
--- a/runtime/jit/jit.h
+++ b/runtime/jit/jit.h
@@ -47,6 +47,7 @@
// At what priority to schedule jit threads. 9 is the lowest foreground priority on device.
// See android/os/Process.java.
static constexpr int kJitPoolThreadPthreadDefaultPriority = 9;
+static constexpr uint32_t kJitSamplesBatchSize = 32; // Must be power of 2.
class JitOptions {
public:
@@ -122,12 +123,16 @@
}
private:
+ // We add the sample in batches of size kJitSamplesBatchSize.
+ // This method rounds the threshold so that it is multiple of the batch size.
+ static uint32_t RoundUpThreshold(uint32_t threshold);
+
bool use_jit_compilation_;
size_t code_cache_initial_capacity_;
size_t code_cache_max_capacity_;
- uint16_t compile_threshold_;
- uint16_t warmup_threshold_;
- uint16_t osr_threshold_;
+ uint32_t compile_threshold_;
+ uint32_t warmup_threshold_;
+ uint32_t osr_threshold_;
uint16_t priority_thread_weight_;
uint16_t invoke_transition_weight_;
bool dump_info_on_shutdown_;
@@ -154,7 +159,7 @@
static constexpr size_t kDefaultPriorityThreadWeightRatio = 1000;
static constexpr size_t kDefaultInvokeTransitionWeightRatio = 500;
// How frequently should the interpreter check to see if OSR compilation is ready.
- static constexpr int16_t kJitRecheckOSRThreshold = 100;
+ static constexpr int16_t kJitRecheckOSRThreshold = 101; // Prime number to avoid patterns.
virtual ~Jit();
@@ -218,7 +223,10 @@
void MethodEntered(Thread* thread, ArtMethod* method)
REQUIRES_SHARED(Locks::mutator_lock_);
- void AddSamples(Thread* self, ArtMethod* method, uint16_t samples, bool with_backedges)
+ ALWAYS_INLINE void AddSamples(Thread* self,
+ ArtMethod* method,
+ uint16_t samples,
+ bool with_backedges)
REQUIRES_SHARED(Locks::mutator_lock_);
void InvokeVirtualOrInterface(ObjPtr<mirror::Object> this_object,
@@ -298,6 +306,15 @@
private:
Jit(JitCodeCache* code_cache, JitOptions* options);
+ // Compile the method if the number of samples passes a threshold.
+ // Returns false if we can not compile now - don't increment the counter and retry later.
+ bool MaybeCompileMethod(Thread* self,
+ ArtMethod* method,
+ uint32_t old_count,
+ uint32_t new_count,
+ bool with_backedges)
+ REQUIRES_SHARED(Locks::mutator_lock_);
+
static bool BindCompilerMethods(std::string* error_msg);
// JIT compiler
diff --git a/runtime/jit/profile_saver.cc b/runtime/jit/profile_saver.cc
index c8d4728..841ace5 100644
--- a/runtime/jit/profile_saver.cc
+++ b/runtime/jit/profile_saver.cc
@@ -431,11 +431,16 @@
ProfileCompilationInfo* cached_info = info_it->second;
const std::set<std::string>& locations = it.second;
+ VLOG(profiler) << "Locations for " << it.first << " " << android::base::Join(locations, ':');
+
for (const auto& pair : hot_methods.GetMap()) {
const DexFile* const dex_file = pair.first;
const std::string base_location = DexFileLoader::GetBaseLocation(dex_file->GetLocation());
+ const MethodReferenceCollection::IndexVector& indices = pair.second;
+ VLOG(profiler) << "Location " << dex_file->GetLocation()
+ << " found=" << (locations.find(base_location) != locations.end())
+ << " indices size=" << indices.size();
if (locations.find(base_location) != locations.end()) {
- const MethodReferenceCollection::IndexVector& indices = pair.second;
uint8_t flags = Hotness::kFlagHot;
flags |= startup ? Hotness::kFlagStartup : Hotness::kFlagPostStartup;
cached_info->AddMethodsForDex(
@@ -448,8 +453,11 @@
for (const auto& pair : sampled_methods.GetMap()) {
const DexFile* const dex_file = pair.first;
const std::string base_location = DexFileLoader::GetBaseLocation(dex_file->GetLocation());
+ const MethodReferenceCollection::IndexVector& indices = pair.second;
+ VLOG(profiler) << "Location " << base_location
+ << " found=" << (locations.find(base_location) != locations.end())
+ << " indices size=" << indices.size();
if (locations.find(base_location) != locations.end()) {
- const MethodReferenceCollection::IndexVector& indices = pair.second;
cached_info->AddMethodsForDex(startup ? Hotness::kFlagStartup : Hotness::kFlagPostStartup,
dex_file,
indices.begin(),
@@ -466,8 +474,7 @@
<< " (" << dex_file->GetLocation() << ")";
cached_info->AddClassesForDex(dex_file, classes.begin(), classes.end());
} else {
- VLOG(profiler) << "Location not found " << base_location
- << " (" << dex_file->GetLocation() << ")";
+ VLOG(profiler) << "Location not found " << base_location;
}
}
total_number_of_profile_entries_cached += resolved_classes_for_location.size();
@@ -513,6 +520,9 @@
}
const std::string& filename = it.first;
const std::set<std::string>& locations = it.second;
+ VLOG(profiler) << "Tracked filename " << filename << " locations "
+ << android::base::Join(locations, ":");
+
std::vector<ProfileMethodInfo> profile_methods;
{
ScopedObjectAccess soa(Thread::Current());
@@ -527,6 +537,9 @@
}
uint64_t last_save_number_of_methods = info.GetNumberOfMethods();
uint64_t last_save_number_of_classes = info.GetNumberOfResolvedClasses();
+ VLOG(profiler) << "last_save_number_of_methods=" << last_save_number_of_methods
+ << " last_save_number_of_classes=" << last_save_number_of_classes
+ << " number of profiled methods=" << profile_methods.size();
// Try to add the method data. Note this may fail is the profile loaded from disk contains
// outdated data (e.g. the previous profiled dex files might have been updated).
@@ -546,6 +559,11 @@
info.ClearData();
force_save = true;
}
+ } else if (VLOG_IS_ON(profiler)) {
+ LOG(INFO) << "Failed to find cached profile for " << filename;
+ for (auto&& pair : profile_cache_) {
+ LOG(INFO) << "Cached profile " << pair.first;
+ }
}
int64_t delta_number_of_methods =
@@ -662,6 +680,7 @@
std::vector<std::string> code_paths_to_profile;
for (const std::string& location : code_paths) {
if (ShouldProfileLocation(location, options.GetProfileAOTCode())) {
+ VLOG(profiler) << "Code path to profile " << location;
code_paths_to_profile.push_back(location);
}
}
diff --git a/runtime/jni/java_vm_ext.cc b/runtime/jni/java_vm_ext.cc
index a61a48a..7a9d292 100644
--- a/runtime/jni/java_vm_ext.cc
+++ b/runtime/jni/java_vm_ext.cc
@@ -535,8 +535,6 @@
if (current_method != nullptr) {
os << "\n from " << current_method->PrettyMethod();
}
- os << "\n";
- self->Dump(os);
if (check_jni_abort_hook_ != nullptr) {
check_jni_abort_hook_(check_jni_abort_hook_data_, os.str());
diff --git a/runtime/mirror/class-inl.h b/runtime/mirror/class-inl.h
index 679ca43..40c7d30 100644
--- a/runtime/mirror/class-inl.h
+++ b/runtime/mirror/class-inl.h
@@ -22,6 +22,7 @@
#include "art_field.h"
#include "art_method.h"
#include "base/array_slice.h"
+#include "base/iteration_range.h"
#include "base/length_prefixed_array.h"
#include "base/utils.h"
#include "class_linker.h"
@@ -31,6 +32,7 @@
#include "dex/invoke_type.h"
#include "dex_cache.h"
#include "iftable.h"
+#include "imtable.h"
#include "object-inl.h"
#include "object_array.h"
#include "read_barrier-inl.h"
@@ -825,7 +827,7 @@
return ProxyDescriptorEquals(match);
} else {
const DexFile& dex_file = GetDexFile();
- const DexFile::TypeId& type_id = dex_file.GetTypeId(GetClassDef()->class_idx_);
+ const dex::TypeId& type_id = dex_file.GetTypeId(GetClassDef()->class_idx_);
return strcmp(dex_file.GetTypeDescriptor(type_id), match) == 0;
}
}
@@ -899,7 +901,7 @@
ObjectArray<Class>* interfaces = GetProxyInterfaces();
return interfaces != nullptr ? interfaces->GetLength() : 0;
} else {
- const DexFile::TypeList* interfaces = GetInterfaceTypeList();
+ const dex::TypeList* interfaces = GetInterfaceTypeList();
if (interfaces == nullptr) {
return 0;
} else {
diff --git a/runtime/mirror/class.cc b/runtime/mirror/class.cc
index c5ed1bf..53c9cc72 100644
--- a/runtime/mirror/class.cc
+++ b/runtime/mirror/class.cc
@@ -30,6 +30,7 @@
#include "dex/descriptors_names.h"
#include "dex/dex_file-inl.h"
#include "dex/dex_file_annotations.h"
+#include "dex/signature-inl.h"
#include "dex_cache.h"
#include "gc/accounting/card_table-inl.h"
#include "gc/heap-inl.h"
@@ -493,7 +494,7 @@
PointerSize pointer_size) {
// We always search by name and signature, ignoring the type index in the MethodId.
const DexFile& dex_file = *dex_cache->GetDexFile();
- const DexFile::MethodId& method_id = dex_file.GetMethodId(dex_method_idx);
+ const dex::MethodId& method_id = dex_file.GetMethodId(dex_method_idx);
StringPiece name = dex_file.StringDataByIdx(method_id.name_idx_);
const Signature signature = dex_file.GetMethodSignature(method_id);
return FindInterfaceMethod(name, signature, pointer_size);
@@ -620,7 +621,7 @@
}
// If not found, we need to search by name and signature.
const DexFile& dex_file = *dex_cache->GetDexFile();
- const DexFile::MethodId& method_id = dex_file.GetMethodId(dex_method_idx);
+ const dex::MethodId& method_id = dex_file.GetMethodId(dex_method_idx);
const Signature signature = dex_file.GetMethodSignature(method_id);
StringPiece name; // Delay strlen() until actually needed.
// If we do not have a dex_cache match, try to find the declared method in this class now.
@@ -651,7 +652,7 @@
// Matching dex_cache. We cannot compare the `dex_method_idx` anymore because
// the type index differs, so compare the name index and proto index.
for (ArtMethod& method : declared_methods) {
- const DexFile::MethodId& cmp_method_id = dex_file.GetMethodId(method.GetDexMethodIndex());
+ const dex::MethodId& cmp_method_id = dex_file.GetMethodId(method.GetDexMethodIndex());
if (cmp_method_id.name_idx_ == method_id.name_idx_ &&
cmp_method_id.proto_idx_ == method_id.proto_idx_) {
candidate_method = &method;
@@ -1005,7 +1006,7 @@
return storage->c_str();
} else {
const DexFile& dex_file = GetDexFile();
- const DexFile::TypeId& type_id = dex_file.GetTypeId(GetClassDef()->class_idx_);
+ const dex::TypeId& type_id = dex_file.GetTypeId(GetClassDef()->class_idx_);
return dex_file.GetTypeDescriptor(type_id);
}
}
@@ -1018,7 +1019,7 @@
return storage->c_str();
}
-const DexFile::ClassDef* Class::GetClassDef() {
+const dex::ClassDef* Class::GetClassDef() {
uint16_t class_def_idx = GetDexClassDefIndex();
if (class_def_idx == DexFile::kDexNoIndex16) {
return nullptr;
@@ -1086,7 +1087,7 @@
const char* Class::GetSourceFile() {
const DexFile& dex_file = GetDexFile();
- const DexFile::ClassDef* dex_class_def = GetClassDef();
+ const dex::ClassDef* dex_class_def = GetClassDef();
if (dex_class_def == nullptr) {
// Generated classes have no class def.
return nullptr;
@@ -1103,8 +1104,8 @@
return "generated class";
}
-const DexFile::TypeList* Class::GetInterfaceTypeList() {
- const DexFile::ClassDef* class_def = GetClassDef();
+const dex::TypeList* Class::GetInterfaceTypeList() {
+ const dex::ClassDef* class_def = GetClassDef();
if (class_def == nullptr) {
return nullptr;
}
@@ -1247,7 +1248,7 @@
dex::TypeIndex Class::FindTypeIndexInOtherDexFile(const DexFile& dex_file) {
std::string temp;
- const DexFile::TypeId* type_id = dex_file.FindTypeId(GetDescriptor(&temp));
+ const dex::TypeId* type_id = dex_file.FindTypeId(GetDescriptor(&temp));
return (type_id == nullptr) ? dex::TypeIndex() : dex_file.GetIndexForTypeId(*type_id);
}
diff --git a/runtime/mirror/class.h b/runtime/mirror/class.h
index d5aa514..f7a41f7 100644
--- a/runtime/mirror/class.h
+++ b/runtime/mirror/class.h
@@ -20,16 +20,13 @@
#include "base/bit_utils.h"
#include "base/casts.h"
#include "base/enums.h"
-#include "base/iteration_range.h"
#include "base/stride_iterator.h"
#include "class_flags.h"
#include "class_status.h"
-#include "dex/dex_file.h"
#include "dex/dex_file_types.h"
#include "dex/modifiers.h"
#include "dex/primitive.h"
#include "gc/allocator_type.h"
-#include "imtable.h"
#include "object.h"
#include "object_array.h"
#include "read_barrier_option.h"
@@ -37,11 +34,19 @@
namespace art {
+namespace dex {
+struct ClassDef;
+class TypeList;
+} // namespace dex
+
class ArtField;
class ArtMethod;
struct ClassOffsets;
+class DexFile;
template<class T> class Handle;
+class ImTable;
enum InvokeType : uint32_t;
+template <typename Iter> class IterationRange;
template<typename T> class LengthPrefixedArray;
template<typename T> class ArraySlice;
class Signature;
@@ -1133,7 +1138,7 @@
bool DescriptorEquals(const char* match) REQUIRES_SHARED(Locks::mutator_lock_);
- const DexFile::ClassDef* GetClassDef() REQUIRES_SHARED(Locks::mutator_lock_);
+ const dex::ClassDef* GetClassDef() REQUIRES_SHARED(Locks::mutator_lock_);
ALWAYS_INLINE uint32_t NumDirectInterfaces() REQUIRES_SHARED(Locks::mutator_lock_);
@@ -1156,7 +1161,7 @@
const DexFile& GetDexFile() REQUIRES_SHARED(Locks::mutator_lock_);
- const DexFile::TypeList* GetInterfaceTypeList() REQUIRES_SHARED(Locks::mutator_lock_);
+ const dex::TypeList* GetInterfaceTypeList() REQUIRES_SHARED(Locks::mutator_lock_);
// Asserts we are initialized or initializing in the given thread.
void AssertInitializedOrInitializingInThread(Thread* self)
diff --git a/runtime/mirror/dex_cache_test.cc b/runtime/mirror/dex_cache_test.cc
index 36c5ae2..f7c1c02 100644
--- a/runtime/mirror/dex_cache_test.cc
+++ b/runtime/mirror/dex_cache_test.cc
@@ -146,8 +146,8 @@
Handle<mirror::DexCache> dex_cache = hs.NewHandle(
class_linker_->FindDexCache(Thread::Current(), dex_file));
- const DexFile::MethodId& method1_id = dex_file.GetMethodId(method1->GetDexMethodIndex());
- const DexFile::MethodId& method2_id = dex_file.GetMethodId(method2->GetDexMethodIndex());
+ const dex::MethodId& method1_id = dex_file.GetMethodId(method1->GetDexMethodIndex());
+ const dex::MethodId& method2_id = dex_file.GetMethodId(method2->GetDexMethodIndex());
Handle<mirror::MethodType> method1_type = hs.NewHandle(
class_linker_->ResolveMethodType(soa.Self(),
method1_id.proto_idx_,
diff --git a/runtime/mirror/object_test.cc b/runtime/mirror/object_test.cc
index d8c7b1d..f4b8ba5 100644
--- a/runtime/mirror/object_test.cc
+++ b/runtime/mirror/object_test.cc
@@ -364,16 +364,16 @@
Handle<Class> klass =
hs.NewHandle(class_linker_->FindClass(soa.Self(), "LStaticsFromCode;", loader));
ArtMethod* clinit = klass->FindClassInitializer(kRuntimePointerSize);
- const DexFile::TypeId* klass_type_id = dex_file->FindTypeId("LStaticsFromCode;");
+ const dex::TypeId* klass_type_id = dex_file->FindTypeId("LStaticsFromCode;");
ASSERT_TRUE(klass_type_id != nullptr);
- const DexFile::TypeId* type_type_id = dex_file->FindTypeId("Ljava/lang/Object;");
+ const dex::TypeId* type_type_id = dex_file->FindTypeId("Ljava/lang/Object;");
ASSERT_TRUE(type_type_id != nullptr);
- const DexFile::StringId* name_str_id = dex_file->FindStringId("s0");
+ const dex::StringId* name_str_id = dex_file->FindStringId("s0");
ASSERT_TRUE(name_str_id != nullptr);
- const DexFile::FieldId* field_id = dex_file->FindFieldId(
+ const dex::FieldId* field_id = dex_file->FindFieldId(
*klass_type_id, *name_str_id, *type_type_id);
ASSERT_TRUE(field_id != nullptr);
uint32_t field_idx = dex_file->GetIndexForFieldId(*field_id);
diff --git a/runtime/native/dalvik_system_DexFile.cc b/runtime/native/dalvik_system_DexFile.cc
index 1da91b0..52482b7 100644
--- a/runtime/native/dalvik_system_DexFile.cc
+++ b/runtime/native/dalvik_system_DexFile.cc
@@ -383,7 +383,7 @@
const std::string descriptor(DotToDescriptor(class_name.c_str()));
const size_t hash(ComputeModifiedUtf8Hash(descriptor.c_str()));
for (auto& dex_file : dex_files) {
- const DexFile::ClassDef* dex_class_def =
+ const dex::ClassDef* dex_class_def =
OatDexFile::FindClassDef(*dex_file, descriptor.c_str(), hash);
if (dex_class_def != nullptr) {
ScopedObjectAccess soa(env);
@@ -440,7 +440,7 @@
std::set<const char*, CharPointerComparator> descriptors;
for (auto& dex_file : dex_files) {
for (size_t i = 0; i < dex_file->NumClassDefs(); ++i) {
- const DexFile::ClassDef& class_def = dex_file->GetClassDef(i);
+ const dex::ClassDef& class_def = dex_file->GetClassDef(i);
const char* descriptor = dex_file->GetClassDescriptor(class_def);
descriptors.insert(descriptor);
}
diff --git a/runtime/native/dalvik_system_VMRuntime.cc b/runtime/native/dalvik_system_VMRuntime.cc
index 892d4cc..d705d5f 100644
--- a/runtime/native/dalvik_system_VMRuntime.cc
+++ b/runtime/native/dalvik_system_VMRuntime.cc
@@ -413,7 +413,7 @@
return; // The entry already contains some ArtField.
}
const DexFile* dex_file = dex_cache->GetDexFile();
- const DexFile::FieldId& field_id = dex_file->GetFieldId(field_idx);
+ const dex::FieldId& field_id = dex_file->GetFieldId(field_idx);
ObjPtr<mirror::Class> klass = Runtime::Current()->GetClassLinker()->LookupResolvedType(
field_id.class_idx_, dex_cache, /* class_loader= */ nullptr);
if (klass == nullptr) {
@@ -439,7 +439,7 @@
return; // The entry already contains some ArtMethod.
}
const DexFile* dex_file = dex_cache->GetDexFile();
- const DexFile::MethodId& method_id = dex_file->GetMethodId(method_idx);
+ const dex::MethodId& method_id = dex_file->GetMethodId(method_idx);
ClassLinker* class_linker = Runtime::Current()->GetClassLinker();
ObjPtr<mirror::Class> klass = class_linker->LookupResolvedType(
diff --git a/runtime/native/java_lang_Class.cc b/runtime/native/java_lang_Class.cc
index 612a4b3..d022c3b 100644
--- a/runtime/native/java_lang_Class.cc
+++ b/runtime/native/java_lang_Class.cc
@@ -220,7 +220,7 @@
return soa.AddLocalReference<jobjectArray>(klass->GetProxyInterfaces()->Clone(soa.Self()));
}
- const DexFile::TypeList* iface_list = klass->GetInterfaceTypeList();
+ const dex::TypeList* iface_list = klass->GetInterfaceTypeList();
if (iface_list == nullptr) {
return nullptr;
}
diff --git a/runtime/native/java_lang_reflect_Executable.cc b/runtime/native/java_lang_reflect_Executable.cc
index ada0a64..2ce56b5 100644
--- a/runtime/native/java_lang_reflect_Executable.cc
+++ b/runtime/native/java_lang_reflect_Executable.cc
@@ -275,8 +275,8 @@
this_method = this_method->GetInterfaceMethodIfProxy(kRuntimePointerSize);
other_method = other_method->GetInterfaceMethodIfProxy(kRuntimePointerSize);
- const DexFile::TypeList* this_list = this_method->GetParameterTypeList();
- const DexFile::TypeList* other_list = other_method->GetParameterTypeList();
+ const dex::TypeList* this_list = this_method->GetParameterTypeList();
+ const dex::TypeList* other_list = other_method->GetParameterTypeList();
if (this_list == other_list) {
return 0;
@@ -298,9 +298,9 @@
}
for (int32_t i = 0; i < this_size; ++i) {
- const DexFile::TypeId& lhs = this_method->GetDexFile()->GetTypeId(
+ const dex::TypeId& lhs = this_method->GetDexFile()->GetTypeId(
this_list->GetTypeItem(i).type_idx_);
- const DexFile::TypeId& rhs = other_method->GetDexFile()->GetTypeId(
+ const dex::TypeId& rhs = other_method->GetDexFile()->GetTypeId(
other_list->GetTypeItem(i).type_idx_);
uint32_t lhs_len, rhs_len;
@@ -343,7 +343,7 @@
ArtMethod* method = ArtMethod::FromReflectedMethod(soa, javaMethod);
method = method->GetInterfaceMethodIfProxy(kRuntimePointerSize);
- const DexFile::TypeList* params = method->GetParameterTypeList();
+ const dex::TypeList* params = method->GetParameterTypeList();
if (params == nullptr) {
return nullptr;
}
@@ -378,7 +378,7 @@
ArtMethod* method = ArtMethod::FromReflectedMethod(soa, javaMethod);
method = method->GetInterfaceMethodIfProxy(kRuntimePointerSize);
- const DexFile::TypeList* params = method->GetParameterTypeList();
+ const dex::TypeList* params = method->GetParameterTypeList();
return (params == nullptr) ? 0 : params->Size();
}
diff --git a/runtime/oat_file.cc b/runtime/oat_file.cc
index 5c5523d..f4a8c50 100644
--- a/runtime/oat_file.cc
+++ b/runtime/oat_file.cc
@@ -48,7 +48,9 @@
#include "base/unix_file/fd_file.h"
#include "base/utils.h"
#include "dex/art_dex_file_loader.h"
+#include "dex/dex_file.h"
#include "dex/dex_file_loader.h"
+#include "dex/dex_file_structs.h"
#include "dex/dex_file_types.h"
#include "dex/standard_dex_file.h"
#include "dex/type_lookup_table.h"
@@ -581,9 +583,9 @@
const char* dex_file_location_data = reinterpret_cast<const char*>(oat);
oat += dex_file_location_size;
- std::string dex_file_location = ResolveRelativeEncodedDexLocation(
- abs_dex_location,
- std::string(dex_file_location_data, dex_file_location_size));
+ std::string dex_file_location(dex_file_location_data, dex_file_location_size);
+ std::string dex_file_name =
+ ResolveRelativeEncodedDexLocation(abs_dex_location, dex_file_location);
uint32_t dex_file_checksum;
if (UNLIKELY(!ReadOatDexFileData(*this, &oat, &dex_file_checksum))) {
@@ -638,7 +640,7 @@
error_msg,
uncompressed_dex_files_.get());
} else {
- loaded = dex_file_loader.Open(dex_file_location.c_str(),
+ loaded = dex_file_loader.Open(dex_file_name.c_str(),
dex_file_location,
/*verify=*/ false,
/*verify_checksum=*/ false,
@@ -819,7 +821,7 @@
this, header->string_ids_size_, sizeof(GcRoot<mirror::String>), string_bss_mapping);
std::string canonical_location =
- DexFileLoader::GetDexCanonicalLocation(dex_file_location.c_str());
+ DexFileLoader::GetDexCanonicalLocation(dex_file_name.c_str());
// Create the OatDexFile and add it to the owning container.
OatDexFile* oat_dex_file = new OatDexFile(this,
@@ -1831,13 +1833,13 @@
reinterpret_cast<const OatMethodOffsets*>(methods_pointer));
}
-const DexFile::ClassDef* OatDexFile::FindClassDef(const DexFile& dex_file,
- const char* descriptor,
- size_t hash) {
+const dex::ClassDef* OatDexFile::FindClassDef(const DexFile& dex_file,
+ const char* descriptor,
+ size_t hash) {
const OatDexFile* oat_dex_file = dex_file.GetOatDexFile();
DCHECK_EQ(ComputeModifiedUtf8Hash(descriptor), hash);
bool used_lookup_table = false;
- const DexFile::ClassDef* lookup_table_classdef = nullptr;
+ const dex::ClassDef* lookup_table_classdef = nullptr;
if (LIKELY((oat_dex_file != nullptr) && oat_dex_file->GetTypeLookupTable().Valid())) {
used_lookup_table = true;
const uint32_t class_def_idx = oat_dex_file->GetTypeLookupTable().Lookup(descriptor, hash);
@@ -1854,10 +1856,10 @@
DCHECK(!used_lookup_table);
return nullptr;
}
- const DexFile::TypeId* type_id = dex_file.FindTypeId(descriptor);
+ const dex::TypeId* type_id = dex_file.FindTypeId(descriptor);
if (type_id != nullptr) {
dex::TypeIndex type_idx = dex_file.GetIndexForTypeId(*type_id);
- const DexFile::ClassDef* found_class_def = dex_file.FindClassDef(type_idx);
+ const dex::ClassDef* found_class_def = dex_file.FindClassDef(type_idx);
if (kIsDebugBuild && used_lookup_table) {
DCHECK_EQ(found_class_def, lookup_table_classdef);
}
diff --git a/runtime/oat_file.h b/runtime/oat_file.h
index ab6e62d..3e9c01f 100644
--- a/runtime/oat_file.h
+++ b/runtime/oat_file.h
@@ -29,7 +29,6 @@
#include "base/tracking_safe_map.h"
#include "class_status.h"
#include "compiler_filter.h"
-#include "dex/dex_file.h"
#include "dex/dex_file_layout.h"
#include "dex/type_lookup_table.h"
#include "dex/utf.h"
@@ -40,6 +39,7 @@
namespace art {
class BitVector;
+class DexFile;
class ElfFile;
class DexLayoutSections;
template <class MirrorType> class GcRoot;
@@ -50,6 +50,10 @@
class OatQuickMethodHeader;
class VdexFile;
+namespace dex {
+struct ClassDef;
+} // namespace dex
+
namespace gc {
namespace collector {
class DummyOatFile;
@@ -500,9 +504,9 @@
// Looks up a class definition by its class descriptor. Hash must be
// ComputeModifiedUtf8Hash(descriptor).
- static const DexFile::ClassDef* FindClassDef(const DexFile& dex_file,
- const char* descriptor,
- size_t hash);
+ static const dex::ClassDef* FindClassDef(const DexFile& dex_file,
+ const char* descriptor,
+ size_t hash);
// Madvise the dex file based on the state we are moving to.
static void MadviseDexFile(const DexFile& dex_file, MadviseState state);
diff --git a/runtime/oat_file_manager.cc b/runtime/oat_file_manager.cc
index 9552ca3..5aa1ea2 100644
--- a/runtime/oat_file_manager.cc
+++ b/runtime/oat_file_manager.cc
@@ -186,7 +186,7 @@
static BitVector GenerateTypeIndexes(const DexFile* dex_file) {
BitVector type_indexes(/*start_bits=*/0, /*expandable=*/true, Allocator::GetMallocAllocator());
for (uint16_t i = 0; i < dex_file->NumClassDefs(); ++i) {
- const DexFile::ClassDef& class_def = dex_file->GetClassDef(i);
+ const dex::ClassDef& class_def = dex_file->GetClassDef(i);
uint16_t type_idx = class_def.class_idx_.index_;
type_indexes.SetBit(type_idx);
}
diff --git a/runtime/reflection.cc b/runtime/reflection.cc
index 8011836..dfd7e64 100644
--- a/runtime/reflection.cc
+++ b/runtime/reflection.cc
@@ -226,7 +226,7 @@
ArtMethod* m,
Thread* self)
REQUIRES_SHARED(Locks::mutator_lock_) {
- const DexFile::TypeList* classes = m->GetParameterTypeList();
+ const dex::TypeList* classes = m->GetParameterTypeList();
// Set receiver if non-null (method is not static)
if (receiver != nullptr) {
Append(receiver);
@@ -367,7 +367,7 @@
void CheckMethodArguments(JavaVMExt* vm, ArtMethod* m, uint32_t* args)
REQUIRES_SHARED(Locks::mutator_lock_) {
- const DexFile::TypeList* params = m->GetParameterTypeList();
+ const dex::TypeList* params = m->GetParameterTypeList();
if (params == nullptr) {
return; // No arguments so nothing to check.
}
@@ -461,7 +461,7 @@
bool CheckArgsForInvokeMethod(ArtMethod* np_method,
ObjPtr<mirror::ObjectArray<mirror::Object>> objects)
REQUIRES_SHARED(Locks::mutator_lock_) {
- const DexFile::TypeList* classes = np_method->GetParameterTypeList();
+ const dex::TypeList* classes = np_method->GetParameterTypeList();
uint32_t classes_size = (classes == nullptr) ? 0 : classes->Size();
uint32_t arg_count = (objects == nullptr) ? 0 : objects->GetLength();
if (UNLIKELY(arg_count != classes_size)) {
diff --git a/runtime/runtime.cc b/runtime/runtime.cc
index 7eac3d9..f30ba0c 100644
--- a/runtime/runtime.cc
+++ b/runtime/runtime.cc
@@ -34,6 +34,7 @@
#include <cstdio>
#include <cstdlib>
#include <limits>
+#include <thread>
#include <vector>
#include "android-base/strings.h"
@@ -233,6 +234,7 @@
class_linker_(nullptr),
signal_catcher_(nullptr),
java_vm_(nullptr),
+ thread_pool_ref_count_(0u),
fault_message_(nullptr),
threads_being_born_(0),
shutdown_cond_(new ConditionVariable("Runtime shutdown", *Locks::runtime_shutdown_lock_)),
@@ -341,13 +343,17 @@
double post_gc_weighted_allocated_bytes =
heap_->GetPostGcWeightedAllocatedBytes() / process_cpu_time;
- LOG_STREAM(INFO) << "Pre GC weighted bytes allocated over CPU time: "
+ LOG_STREAM(INFO) << "Average bytes allocated at GC start, weighted by CPU time between GCs: "
+ << static_cast<uint64_t>(pre_gc_weighted_allocated_bytes)
<< " (" << PrettySize(pre_gc_weighted_allocated_bytes) << ")";
- LOG_STREAM(INFO) << "Post GC weighted bytes allocated over CPU time: "
+ LOG_STREAM(INFO) << "Average bytes allocated at GC end, weighted by CPU time between GCs: "
+ << static_cast<uint64_t>(post_gc_weighted_allocated_bytes)
<< " (" << PrettySize(post_gc_weighted_allocated_bytes) << ")"
<< "\n";
}
+ WaitForThreadPoolWorkersToStart();
+
if (jit_ != nullptr) {
// Wait for the workers to be created since there can't be any threads attaching during
// shutdown.
@@ -400,6 +406,8 @@
// JIT compiler threads.
jit_->DeleteThreadPool();
}
+ DeleteThreadPool();
+ CHECK(thread_pool_ == nullptr);
// Make sure our internal threads are dead before we start tearing down things they're using.
GetRuntimeCallbacks()->StopDebugger();
@@ -930,6 +938,18 @@
// Create the thread pools.
heap_->CreateThreadPool();
+ {
+ ScopedTrace timing("CreateThreadPool");
+ constexpr size_t kStackSize = 64 * KB;
+ constexpr size_t kMaxRuntimeWorkers = 4u;
+ const size_t num_workers =
+ std::min(static_cast<size_t>(std::thread::hardware_concurrency()), kMaxRuntimeWorkers);
+ MutexLock mu(Thread::Current(), *Locks::runtime_thread_pool_lock_);
+ CHECK(thread_pool_ == nullptr);
+ thread_pool_.reset(new ThreadPool("Runtime", num_workers, /*create_peers=*/false, kStackSize));
+ thread_pool_->StartWorkers(Thread::Current());
+ }
+
// Reset the gc performance data at zygote fork so that the GCs
// before fork aren't attributed to an app.
heap_->ResetGcPerformanceInfo();
@@ -2658,4 +2678,45 @@
GetClassLinker()->VisitClasses(&visitor);
}
}
+
+Runtime::ScopedThreadPoolUsage::ScopedThreadPoolUsage()
+ : thread_pool_(Runtime::Current()->AcquireThreadPool()) {}
+
+Runtime::ScopedThreadPoolUsage::~ScopedThreadPoolUsage() {
+ Runtime::Current()->ReleaseThreadPool();
+}
+
+bool Runtime::DeleteThreadPool() {
+ // Make sure workers are started to prevent thread shutdown errors.
+ WaitForThreadPoolWorkersToStart();
+ std::unique_ptr<ThreadPool> thread_pool;
+ {
+ MutexLock mu(Thread::Current(), *Locks::runtime_thread_pool_lock_);
+ if (thread_pool_ref_count_ == 0) {
+ thread_pool = std::move(thread_pool_);
+ }
+ }
+ return thread_pool != nullptr;
+}
+
+ThreadPool* Runtime::AcquireThreadPool() {
+ MutexLock mu(Thread::Current(), *Locks::runtime_thread_pool_lock_);
+ ++thread_pool_ref_count_;
+ return thread_pool_.get();
+}
+
+void Runtime::ReleaseThreadPool() {
+ MutexLock mu(Thread::Current(), *Locks::runtime_thread_pool_lock_);
+ CHECK_GT(thread_pool_ref_count_, 0u);
+ --thread_pool_ref_count_;
+}
+
+void Runtime::WaitForThreadPoolWorkersToStart() {
+ // Need to make sure workers are created before deleting the pool.
+ ScopedThreadPoolUsage stpu;
+ if (stpu.GetThreadPool() != nullptr) {
+ stpu.GetThreadPool()->WaitForWorkersToBeCreated();
+ }
+}
+
} // namespace art
diff --git a/runtime/runtime.h b/runtime/runtime.h
index 76cfcd1..a2d519d 100644
--- a/runtime/runtime.h
+++ b/runtime/runtime.h
@@ -792,6 +792,28 @@
return verifier_logging_threshold_ms_;
}
+ // Atomically delete the thread pool if the reference count is 0.
+ bool DeleteThreadPool() REQUIRES(!Locks::runtime_thread_pool_lock_);
+
+ // Wait for all the thread workers to be attached.
+ void WaitForThreadPoolWorkersToStart() REQUIRES(!Locks::runtime_thread_pool_lock_);
+
+ // Scoped usage of the runtime thread pool. Prevents the pool from being
+ // deleted. Note that the thread pool is only for startup and gets deleted after.
+ class ScopedThreadPoolUsage {
+ public:
+ ScopedThreadPoolUsage();
+ ~ScopedThreadPoolUsage();
+
+ // Return the thread pool.
+ ThreadPool* GetThreadPool() const {
+ return thread_pool_;
+ }
+
+ private:
+ ThreadPool* const thread_pool_;
+ };
+
private:
static void InitPlatformSignalHandlers();
@@ -828,6 +850,9 @@
// friend).
std::string GetFaultMessage();
+ ThreadPool* AcquireThreadPool() REQUIRES(!Locks::runtime_thread_pool_lock_);
+ void ReleaseThreadPool() REQUIRES(!Locks::runtime_thread_pool_lock_);
+
// A pointer to the active runtime or null.
static Runtime* instance_;
@@ -911,6 +936,10 @@
std::unique_ptr<jit::JitCodeCache> jit_code_cache_;
std::unique_ptr<jit::JitOptions> jit_options_;
+ // Runtime thread pool. The pool is only for startup and gets deleted after.
+ std::unique_ptr<ThreadPool> thread_pool_ GUARDED_BY(Locks::runtime_thread_pool_lock_);
+ size_t thread_pool_ref_count_ GUARDED_BY(Locks::runtime_thread_pool_lock_);
+
// Fault message, printed when we get a SIGSEGV. Stored as a native-heap object and accessed
// lock-free, so needs to be atomic.
std::atomic<std::string*> fault_message_;
@@ -1115,6 +1144,7 @@
// Note: See comments on GetFaultMessage.
friend std::string GetFaultMessageForAbortLogging();
+ friend class ScopedThreadPoolUsage;
DISALLOW_COPY_AND_ASSIGN(Runtime);
};
diff --git a/runtime/runtime_callbacks.cc b/runtime/runtime_callbacks.cc
index bf74816..da13eb8 100644
--- a/runtime/runtime_callbacks.cc
+++ b/runtime/runtime_callbacks.cc
@@ -205,14 +205,14 @@
Handle<mirror::Class> temp_class,
Handle<mirror::ClassLoader> loader,
const DexFile& initial_dex_file,
- const DexFile::ClassDef& initial_class_def,
+ const dex::ClassDef& initial_class_def,
/*out*/DexFile const** final_dex_file,
- /*out*/DexFile::ClassDef const** final_class_def) {
+ /*out*/dex::ClassDef const** final_class_def) {
DexFile const* current_dex_file = &initial_dex_file;
- DexFile::ClassDef const* current_class_def = &initial_class_def;
+ dex::ClassDef const* current_class_def = &initial_class_def;
for (ClassLoadCallback* cb : class_callbacks_) {
DexFile const* new_dex_file = nullptr;
- DexFile::ClassDef const* new_class_def = nullptr;
+ dex::ClassDef const* new_class_def = nullptr;
cb->ClassPreDefine(descriptor,
temp_class,
loader,
diff --git a/runtime/runtime_callbacks.h b/runtime/runtime_callbacks.h
index 32ee3aa3..41d552a 100644
--- a/runtime/runtime_callbacks.h
+++ b/runtime/runtime_callbacks.h
@@ -22,11 +22,14 @@
#include "base/array_ref.h"
#include "base/locks.h"
#include "base/macros.h"
-#include "dex/dex_file.h"
#include "handle.h"
namespace art {
+namespace dex {
+struct ClassDef;
+} // namespace dex
+
namespace mirror {
class Class;
class ClassLoader;
@@ -35,6 +38,7 @@
class ArtMethod;
class ClassLoadCallback;
+class DexFile;
class Thread;
class MethodCallback;
class Monitor;
@@ -183,9 +187,9 @@
Handle<mirror::Class> temp_class,
Handle<mirror::ClassLoader> loader,
const DexFile& initial_dex_file,
- const DexFile::ClassDef& initial_class_def,
+ const dex::ClassDef& initial_class_def,
/*out*/DexFile const** final_dex_file,
- /*out*/DexFile::ClassDef const** final_class_def)
+ /*out*/dex::ClassDef const** final_class_def)
REQUIRES_SHARED(Locks::mutator_lock_);
void AddMethodCallback(MethodCallback* cb) REQUIRES(Locks::mutator_lock_);
diff --git a/runtime/runtime_callbacks_test.cc b/runtime/runtime_callbacks_test.cc
index f2e5012..df06a9f 100644
--- a/runtime/runtime_callbacks_test.cc
+++ b/runtime/runtime_callbacks_test.cc
@@ -147,6 +147,8 @@
self->TransitionFromSuspendedToRunnable();
bool started = runtime_->Start();
ASSERT_TRUE(started);
+ // Make sure the workers are done starting so we don't get callbacks for them.
+ runtime_->WaitForThreadPoolWorkersToStart();
cb_.state = CallbackState::kBase; // Ignore main thread attach.
@@ -255,9 +257,9 @@
Handle<mirror::Class> klass ATTRIBUTE_UNUSED,
Handle<mirror::ClassLoader> class_loader ATTRIBUTE_UNUSED,
const DexFile& initial_dex_file,
- const DexFile::ClassDef& initial_class_def ATTRIBUTE_UNUSED,
+ const dex::ClassDef& initial_class_def ATTRIBUTE_UNUSED,
/*out*/DexFile const** final_dex_file ATTRIBUTE_UNUSED,
- /*out*/DexFile::ClassDef const** final_class_def ATTRIBUTE_UNUSED) override
+ /*out*/dex::ClassDef const** final_class_def ATTRIBUTE_UNUSED) override
REQUIRES_SHARED(Locks::mutator_lock_) {
const std::string& location = initial_dex_file.GetLocation();
std::string event =
diff --git a/runtime/thread.cc b/runtime/thread.cc
index f459f9c..7c050a4 100644
--- a/runtime/thread.cc
+++ b/runtime/thread.cc
@@ -73,6 +73,7 @@
#include "gc_root.h"
#include "handle_scope-inl.h"
#include "indirect_reference_table-inl.h"
+#include "instrumentation.h"
#include "interpreter/interpreter.h"
#include "interpreter/mterp/mterp.h"
#include "interpreter/shadow_frame-inl.h"
diff --git a/runtime/thread.h b/runtime/thread.h
index 6db1943..ad69ecf 100644
--- a/runtime/thread.h
+++ b/runtime/thread.h
@@ -36,7 +36,6 @@
#include "entrypoints/jni/jni_entrypoints.h"
#include "entrypoints/quick/quick_entrypoints.h"
#include "handle_scope.h"
-#include "instrumentation.h"
#include "interpreter/interpreter_cache.h"
#include "jvalue.h"
#include "managed_stack.h"
@@ -59,6 +58,10 @@
} // namespace collector
} // namespace gc
+namespace instrumentation {
+struct InstrumentationStackFrame;
+} // namespace instrumentation
+
namespace mirror {
class Array;
class Class;
diff --git a/runtime/thread_list.cc b/runtime/thread_list.cc
index 4bbd81a..75462ac 100644
--- a/runtime/thread_list.cc
+++ b/runtime/thread_list.cc
@@ -766,15 +766,29 @@
#if ART_USE_FUTEXES
if (futex(pending_threads.Address(), FUTEX_WAIT_PRIVATE, cur_val, &wait_timeout, nullptr, 0)
!= 0) {
- // EAGAIN and EINTR both indicate a spurious failure, try again from the beginning.
- if ((errno != EAGAIN) && (errno != EINTR)) {
- if (errno == ETIMEDOUT) {
- LOG(kIsDebugBuild ? ::android::base::FATAL : ::android::base::ERROR)
- << "Timed out waiting for threads to suspend, waited for "
- << PrettyDuration(NanoTime() - start_time);
- } else {
- PLOG(FATAL) << "futex wait failed for SuspendAllInternal()";
+ if ((errno == EAGAIN) || (errno == EINTR)) {
+ // EAGAIN and EINTR both indicate a spurious failure, try again from the beginning.
+ continue;
+ }
+ if (errno == ETIMEDOUT) {
+ const uint64_t wait_time = NanoTime() - start_time;
+ MutexLock mu(self, *Locks::thread_list_lock_);
+ MutexLock mu2(self, *Locks::thread_suspend_count_lock_);
+ std::ostringstream oss;
+ for (const auto& thread : list_) {
+ if (thread == ignore1 || thread == ignore2) {
+ continue;
+ }
+ if (!thread->IsSuspended()) {
+ oss << std::endl << "Thread not suspended: " << *thread;
+ }
}
+ LOG(kIsDebugBuild ? ::android::base::FATAL : ::android::base::ERROR)
+ << "Timed out waiting for threads to suspend, waited for "
+ << PrettyDuration(wait_time)
+ << oss.str();
+ } else {
+ PLOG(FATAL) << "futex wait failed for SuspendAllInternal()";
}
} // else re-check pending_threads in the next iteration (this may be a spurious wake-up).
#else
diff --git a/runtime/transaction_test.cc b/runtime/transaction_test.cc
index 3d9afa0..69ded3d 100644
--- a/runtime/transaction_test.cc
+++ b/runtime/transaction_test.cc
@@ -489,7 +489,7 @@
// Go search the dex file to find the string id of our string.
static const char* kResolvedString = "ResolvedString";
- const DexFile::StringId* string_id = dex_file->FindStringId(kResolvedString);
+ const dex::StringId* string_id = dex_file->FindStringId(kResolvedString);
ASSERT_TRUE(string_id != nullptr);
dex::StringIndex string_idx = dex_file->GetIndexForStringId(*string_id);
ASSERT_TRUE(string_idx.IsValid());
diff --git a/runtime/vdex_file.cc b/runtime/vdex_file.cc
index f24711a..72c42b9 100644
--- a/runtime/vdex_file.cc
+++ b/runtime/vdex_file.cc
@@ -281,12 +281,12 @@
return;
}
// Make sure to not unquicken the same code item multiple times.
- std::unordered_set<const DexFile::CodeItem*> unquickened_code_item;
+ std::unordered_set<const dex::CodeItem*> unquickened_code_item;
CompactOffsetTable::Accessor accessor(GetQuickenInfoOffsetTable(source_dex_begin,
quickening_info));
for (ClassAccessor class_accessor : target_dex_file.GetClasses()) {
for (const ClassAccessor::Method& method : class_accessor.GetMethods()) {
- const DexFile::CodeItem* code_item = method.GetCodeItem();
+ const dex::CodeItem* code_item = method.GetCodeItem();
if (code_item != nullptr && unquickened_code_item.emplace(code_item).second) {
const uint32_t offset = accessor.GetOffset(method.GetIndex());
// Offset being 0 means not quickened.
diff --git a/runtime/verifier/method_verifier.cc b/runtime/verifier/method_verifier.cc
index 0b33a0b..1679821 100644
--- a/runtime/verifier/method_verifier.cc
+++ b/runtime/verifier/method_verifier.cc
@@ -158,7 +158,7 @@
bool early_failure = false;
std::string failure_message;
const DexFile& dex_file = klass->GetDexFile();
- const DexFile::ClassDef* class_def = klass->GetClassDef();
+ const dex::ClassDef* class_def = klass->GetClassDef();
ObjPtr<mirror::Class> super = klass->GetSuperClass();
std::string temp;
if (super == nullptr && strcmp("Ljava/lang/Object;", klass->GetDescriptor(&temp)) != 0) {
@@ -210,7 +210,7 @@
const DexFile* dex_file,
Handle<mirror::DexCache> dex_cache,
Handle<mirror::ClassLoader> class_loader,
- const DexFile::ClassDef& class_def,
+ const dex::ClassDef& class_def,
CompilerCallbacks* callbacks,
bool allow_soft_failures,
HardFailLogMode log_level,
@@ -319,8 +319,8 @@
const DexFile* dex_file,
Handle<mirror::DexCache> dex_cache,
Handle<mirror::ClassLoader> class_loader,
- const DexFile::ClassDef& class_def,
- const DexFile::CodeItem* code_item,
+ const dex::ClassDef& class_def,
+ const dex::CodeItem* code_item,
ArtMethod* method,
uint32_t method_access_flags,
CompilerCallbacks* callbacks,
@@ -462,8 +462,8 @@
const DexFile* dex_file,
Handle<mirror::DexCache> dex_cache,
Handle<mirror::ClassLoader> class_loader,
- const DexFile::ClassDef& class_def,
- const DexFile::CodeItem* code_item,
+ const dex::ClassDef& class_def,
+ const dex::CodeItem* code_item,
ArtMethod* method,
uint32_t method_access_flags,
uint32_t api_level) {
@@ -500,8 +500,8 @@
const DexFile* dex_file,
Handle<mirror::DexCache> dex_cache,
Handle<mirror::ClassLoader> class_loader,
- const DexFile::ClassDef& class_def,
- const DexFile::CodeItem* code_item,
+ const dex::ClassDef& class_def,
+ const dex::CodeItem* code_item,
uint32_t dex_method_idx,
ArtMethod* method,
uint32_t method_access_flags,
@@ -602,7 +602,7 @@
bool MethodVerifier::Verify() {
// Some older code doesn't correctly mark constructors as such. Test for this case by looking at
// the name.
- const DexFile::MethodId& method_id = dex_file_->GetMethodId(dex_method_idx_);
+ const dex::MethodId& method_id = dex_file_->GetMethodId(dex_method_idx_);
const char* method_name = dex_file_->StringDataByIdx(method_id.name_idx_);
bool instance_constructor_by_name = strcmp("<init>", method_name) == 0;
bool static_constructor_by_name = strcmp("<clinit>", method_name) == 0;
@@ -917,7 +917,7 @@
return true;
}
const uint32_t insns_size = code_item_accessor_.InsnsSizeInCodeUnits();
- for (const DexFile::TryItem& try_item : code_item_accessor_.TryItems()) {
+ for (const dex::TryItem& try_item : code_item_accessor_.TryItems()) {
const uint32_t start = try_item.start_addr_;
const uint32_t end = start + try_item.insn_count_;
if ((start >= end) || (start >= insns_size) || (end > insns_size)) {
@@ -1637,7 +1637,7 @@
cur_arg++;
}
- const DexFile::ProtoId& proto_id =
+ const dex::ProtoId& proto_id =
dex_file_->GetMethodPrototype(dex_file_->GetMethodId(dex_method_idx_));
DexFileParameterIterator iterator(*dex_file_, proto_id);
@@ -1876,7 +1876,7 @@
// Returns the index of the first final instance field of the given class, or kDexNoIndex if there
// is no such field.
static uint32_t GetFirstFinalInstanceFieldIndex(const DexFile& dex_file, dex::TypeIndex type_idx) {
- const DexFile::ClassDef* class_def = dex_file.FindClassDef(type_idx);
+ const dex::ClassDef* class_def = dex_file.FindClassDef(type_idx);
DCHECK(class_def != nullptr);
ClassAccessor accessor(dex_file, *class_def);
for (const ClassAccessor::Field& field : accessor.GetInstanceFields()) {
@@ -2885,7 +2885,7 @@
}
if (return_type == nullptr) {
uint32_t method_idx = GetMethodIdxOfInvoke(inst);
- const DexFile::MethodId& method_id = dex_file_->GetMethodId(method_idx);
+ const dex::MethodId& method_id = dex_file_->GetMethodId(method_idx);
dex::TypeIndex return_type_idx =
dex_file_->GetProtoId(method_id.proto_idx_).return_type_idx_;
const char* descriptor = dex_file_->StringByTypeIdx(return_type_idx);
@@ -2908,7 +2908,7 @@
const RegType* return_type = nullptr;
if (called_method == nullptr) {
uint32_t method_idx = (is_range) ? inst->VRegB_3rc() : inst->VRegB_35c();
- const DexFile::MethodId& method_id = dex_file_->GetMethodId(method_idx);
+ const dex::MethodId& method_id = dex_file_->GetMethodId(method_idx);
is_constructor = strcmp("<init>", dex_file_->StringDataByIdx(method_id.name_idx_)) == 0;
dex::TypeIndex return_type_idx =
dex_file_->GetProtoId(method_id.proto_idx_).return_type_idx_;
@@ -2986,7 +2986,7 @@
const char* descriptor;
if (called_method == nullptr) {
uint32_t method_idx = (is_range) ? inst->VRegB_3rc() : inst->VRegB_35c();
- const DexFile::MethodId& method_id = dex_file_->GetMethodId(method_idx);
+ const dex::MethodId& method_id = dex_file_->GetMethodId(method_idx);
dex::TypeIndex return_type_idx =
dex_file_->GetProtoId(method_id.proto_idx_).return_type_idx_;
descriptor = dex_file_->StringByTypeIdx(return_type_idx);
@@ -3041,7 +3041,7 @@
const char* descriptor;
if (abs_method == nullptr) {
uint32_t method_idx = (is_range) ? inst->VRegB_3rc() : inst->VRegB_35c();
- const DexFile::MethodId& method_id = dex_file_->GetMethodId(method_idx);
+ const dex::MethodId& method_id = dex_file_->GetMethodId(method_idx);
dex::TypeIndex return_type_idx =
dex_file_->GetProtoId(method_id.proto_idx_).return_type_idx_;
descriptor = dex_file_->StringByTypeIdx(return_type_idx);
@@ -3106,7 +3106,7 @@
// method handle produced by step 1. The dex file verifier has checked ranges for
// the first three arguments and CheckCallSite has checked the method handle type.
const dex::ProtoIndex proto_idx = dex_file_->GetProtoIndexForCallSite(call_site_idx);
- const DexFile::ProtoId& proto_id = dex_file_->GetProtoId(proto_idx);
+ const dex::ProtoId& proto_id = dex_file_->GetProtoId(proto_idx);
DexFileParameterIterator param_it(*dex_file_, proto_id);
// Treat method as static as it has yet to be determined.
VerifyInvocationArgsFromIterator(¶m_it, inst, METHOD_STATIC, is_range, nullptr);
@@ -3497,7 +3497,7 @@
*/
if ((opcode_flags & Instruction::kThrow) != 0 && GetInstructionFlags(work_insn_idx_).IsInTry()) {
bool has_catch_all_handler = false;
- const DexFile::TryItem* try_item = code_item_accessor_.FindTryItem(work_insn_idx_);
+ const dex::TryItem* try_item = code_item_accessor_.FindTryItem(work_insn_idx_);
CHECK(try_item != nullptr);
CatchHandlerIterator iterator(code_item_accessor_, *try_item);
@@ -3749,7 +3749,7 @@
ArtMethod* MethodVerifier::ResolveMethodAndCheckAccess(
uint32_t dex_method_idx, MethodType method_type) {
- const DexFile::MethodId& method_id = dex_file_->GetMethodId(dex_method_idx);
+ const dex::MethodId& method_id = dex_file_->GetMethodId(dex_method_idx);
const RegType& klass_type = ResolveClass<CheckAccess::kYes>(method_id.class_idx_);
if (klass_type.IsConflict()) {
std::string append(" in attempt to access method ");
@@ -4093,7 +4093,7 @@
}
// Check method handle kind is valid.
- const DexFile::MethodHandleItem& mh = dex_file_->GetMethodHandle(index[0]);
+ const dex::MethodHandleItem& mh = dex_file_->GetMethodHandle(index[0]);
if (mh.method_handle_type_ != static_cast<uint16_t>(DexFile::MethodHandleType::kInvokeStatic)) {
Fail(VERIFY_ERROR_BAD_CLASS_HARD) << "Call site #" << call_site_idx
<< " argument 0 method handle type is not InvokeStatic: "
@@ -4125,7 +4125,7 @@
private:
ArtMethod* res_method_;
size_t pos_;
- const DexFile::TypeList* params_;
+ const dex::TypeList* params_;
const size_t params_size_;
};
@@ -4231,7 +4231,7 @@
return false;
}
- const DexFile::TypeList* types = method->GetParameterTypeList();
+ const dex::TypeList* types = method->GetParameterTypeList();
if (types->Size() != 1) {
Fail(VERIFY_ERROR_BAD_CLASS_HARD)
<< "Signature polymorphic method has too many arguments " << types->Size() << " != 1";
@@ -4553,7 +4553,7 @@
}
ArtField* MethodVerifier::GetStaticField(int field_idx) {
- const DexFile::FieldId& field_id = dex_file_->GetFieldId(field_idx);
+ const dex::FieldId& field_id = dex_file_->GetFieldId(field_idx);
// Check access to class
const RegType& klass_type = ResolveClass<CheckAccess::kYes>(field_id.class_idx_);
if (klass_type.IsConflict()) { // bad class
@@ -4596,7 +4596,7 @@
}
ArtField* MethodVerifier::GetInstanceField(const RegType& obj_type, int field_idx) {
- const DexFile::FieldId& field_id = dex_file_->GetFieldId(field_idx);
+ const dex::FieldId& field_id = dex_file_->GetFieldId(field_idx);
// Check access to class.
const RegType& klass_type = ResolveClass<CheckAccess::kYes>(field_id.class_idx_);
if (klass_type.IsConflict()) {
@@ -4756,7 +4756,7 @@
//
// Note: see b/34966607. This and above may be changed in the future.
if (kAccType == FieldAccessType::kAccPut) {
- const DexFile::FieldId& field_id = dex_file_->GetFieldId(field_idx);
+ const dex::FieldId& field_id = dex_file_->GetFieldId(field_idx);
const char* field_class_descriptor = dex_file_->GetFieldDeclaringClassDescriptor(field_id);
const RegType* field_class_type = ®_types_.FromDescriptor(GetClassLoader(),
field_class_descriptor,
@@ -4772,7 +4772,7 @@
}
}
if (field_type == nullptr) {
- const DexFile::FieldId& field_id = dex_file_->GetFieldId(field_idx);
+ const dex::FieldId& field_id = dex_file_->GetFieldId(field_idx);
const char* descriptor = dex_file_->GetFieldTypeDescriptor(field_id);
field_type = ®_types_.FromDescriptor(GetClassLoader(), descriptor, false);
}
@@ -4935,8 +4935,8 @@
}
}
if (return_type_ == nullptr) {
- const DexFile::MethodId& method_id = dex_file_->GetMethodId(dex_method_idx_);
- const DexFile::ProtoId& proto_id = dex_file_->GetMethodPrototype(method_id);
+ const dex::MethodId& method_id = dex_file_->GetMethodId(dex_method_idx_);
+ const dex::ProtoId& proto_id = dex_file_->GetMethodPrototype(method_id);
dex::TypeIndex return_type_idx = proto_id.return_type_idx_;
const char* descriptor = dex_file_->GetTypeDescriptor(dex_file_->GetTypeId(return_type_idx));
return_type_ = ®_types_.FromDescriptor(GetClassLoader(), descriptor, false);
@@ -4947,7 +4947,7 @@
const RegType& MethodVerifier::GetDeclaringClass() {
if (declaring_class_ == nullptr) {
- const DexFile::MethodId& method_id = dex_file_->GetMethodId(dex_method_idx_);
+ const dex::MethodId& method_id = dex_file_->GetMethodId(dex_method_idx_);
const char* descriptor
= dex_file_->GetTypeDescriptor(dex_file_->GetTypeId(method_id.class_idx_));
if (method_being_verified_ != nullptr) {
diff --git a/runtime/verifier/method_verifier.h b/runtime/verifier/method_verifier.h
index eef2280..c178df0 100644
--- a/runtime/verifier/method_verifier.h
+++ b/runtime/verifier/method_verifier.h
@@ -26,7 +26,6 @@
#include "base/scoped_arena_containers.h"
#include "base/value_object.h"
#include "dex/code_item_accessors.h"
-#include "dex/dex_file.h"
#include "dex/dex_file_types.h"
#include "dex/method_reference.h"
#include "handle.h"
@@ -39,11 +38,17 @@
class ClassLinker;
class CompilerCallbacks;
+class DexFile;
class Instruction;
struct ReferenceMap2Visitor;
class Thread;
class VariableIndentationOutputStream;
+namespace dex {
+struct ClassDef;
+struct CodeItem;
+} // namespace dex
+
namespace mirror {
class DexCache;
} // namespace mirror
@@ -107,7 +112,7 @@
const DexFile* dex_file,
Handle<mirror::DexCache> dex_cache,
Handle<mirror::ClassLoader> class_loader,
- const DexFile::ClassDef& class_def,
+ const dex::ClassDef& class_def,
CompilerCallbacks* callbacks,
bool allow_soft_failures,
HardFailLogMode log_level,
@@ -121,8 +126,8 @@
const DexFile* dex_file,
Handle<mirror::DexCache> dex_cache,
Handle<mirror::ClassLoader> class_loader,
- const DexFile::ClassDef& class_def,
- const DexFile::CodeItem* code_item, ArtMethod* method,
+ const dex::ClassDef& class_def,
+ const dex::CodeItem* code_item, ArtMethod* method,
uint32_t method_access_flags,
uint32_t api_level)
REQUIRES_SHARED(Locks::mutator_lock_);
@@ -238,8 +243,8 @@
const DexFile* dex_file,
Handle<mirror::DexCache> dex_cache,
Handle<mirror::ClassLoader> class_loader,
- const DexFile::ClassDef& class_def,
- const DexFile::CodeItem* code_item,
+ const dex::ClassDef& class_def,
+ const dex::CodeItem* code_item,
uint32_t method_idx,
ArtMethod* method,
uint32_t access_flags,
@@ -297,8 +302,8 @@
const DexFile* dex_file,
Handle<mirror::DexCache> dex_cache,
Handle<mirror::ClassLoader> class_loader,
- const DexFile::ClassDef& class_def_idx,
- const DexFile::CodeItem* code_item,
+ const dex::ClassDef& class_def_idx,
+ const dex::CodeItem* code_item,
ArtMethod* method,
uint32_t method_access_flags,
CompilerCallbacks* callbacks,
@@ -716,7 +721,7 @@
Handle<mirror::DexCache> dex_cache_ GUARDED_BY(Locks::mutator_lock_);
// The class loader for the declaring class of the method.
Handle<mirror::ClassLoader> class_loader_ GUARDED_BY(Locks::mutator_lock_);
- const DexFile::ClassDef& class_def_; // The class def of the declaring class of the method.
+ const dex::ClassDef& class_def_; // The class def of the declaring class of the method.
const CodeItemDataAccessor code_item_accessor_;
const RegType* declaring_class_; // Lazily computed reg type of the method's declaring class.
// Instruction widths and flags, one entry per code unit.
diff --git a/runtime/verifier/method_verifier_test.cc b/runtime/verifier/method_verifier_test.cc
index 7519257..36890a6 100644
--- a/runtime/verifier/method_verifier_test.cc
+++ b/runtime/verifier/method_verifier_test.cc
@@ -57,7 +57,7 @@
REQUIRES_SHARED(Locks::mutator_lock_) {
// Verify all the classes defined in this file
for (size_t i = 0; i < dex.NumClassDefs(); i++) {
- const DexFile::ClassDef& class_def = dex.GetClassDef(i);
+ const dex::ClassDef& class_def = dex.GetClassDef(i);
const char* descriptor = dex.GetClassDescriptor(class_def);
VerifyClass(descriptor);
}
diff --git a/runtime/verifier/verifier_deps.cc b/runtime/verifier/verifier_deps.cc
index d346a95..bdcadd9 100644
--- a/runtime/verifier/verifier_deps.cc
+++ b/runtime/verifier/verifier_deps.cc
@@ -99,9 +99,9 @@
DCHECK(dex_cache != nullptr) << klass->PrettyClass();
if (dex_cache->GetDexFile() == &dex_file) {
// FindStringId is slow, try to go through the class def if we have one.
- const DexFile::ClassDef* class_def = klass->GetClassDef();
+ const dex::ClassDef* class_def = klass->GetClassDef();
DCHECK(class_def != nullptr) << klass->PrettyClass();
- const DexFile::TypeId& type_id = dex_file.GetTypeId(class_def->class_idx_);
+ const dex::TypeId& type_id = dex_file.GetTypeId(class_def->class_idx_);
if (kIsDebugBuild) {
std::string temp;
CHECK_EQ(GetIdFromString(dex_file, klass->GetDescriptor(&temp)), type_id.descriptor_idx_);
@@ -119,9 +119,9 @@
ObjPtr<mirror::Class> klass)
REQUIRES_SHARED(Locks::mutator_lock_) {
if (!klass->IsArrayClass()) {
- const DexFile::TypeId& type_id = dex_file.GetTypeId(type_idx);
+ const dex::TypeId& type_id = dex_file.GetTypeId(type_idx);
const DexFile& klass_dex = klass->GetDexFile();
- const DexFile::TypeId& klass_type_id = klass_dex.GetTypeId(klass->GetClassDef()->class_idx_);
+ const dex::TypeId& klass_type_id = klass_dex.GetTypeId(klass->GetClassDef()->class_idx_);
if (strcmp(dex_file.GetTypeDescriptor(type_id),
klass_dex.GetTypeDescriptor(klass_type_id)) == 0) {
return type_id.descriptor_idx_;
@@ -201,7 +201,7 @@
}
dex::StringIndex VerifierDeps::GetIdFromString(const DexFile& dex_file, const std::string& str) {
- const DexFile::StringId* string_id = dex_file.FindStringId(str.c_str());
+ const dex::StringId* string_id = dex_file.FindStringId(str.c_str());
if (string_id != nullptr) {
// String is in the DEX file. Return its ID.
return dex_file.GetIndexForStringId(*string_id);
@@ -805,7 +805,7 @@
}
for (const FieldResolution& entry : dep.second->fields_) {
- const DexFile::FieldId& field_id = dex_file.GetFieldId(entry.GetDexFieldIndex());
+ const dex::FieldId& field_id = dex_file.GetFieldId(entry.GetDexFieldIndex());
vios->Stream()
<< dex_file.GetFieldDeclaringClassDescriptor(field_id) << "->"
<< dex_file.GetFieldName(field_id) << ":"
@@ -823,7 +823,7 @@
}
for (const MethodResolution& method : dep.second->methods_) {
- const DexFile::MethodId& method_id = dex_file.GetMethodId(method.GetDexMethodIndex());
+ const dex::MethodId& method_id = dex_file.GetMethodId(method.GetDexMethodIndex());
vios->Stream()
<< dex_file.GetMethodDeclaringClassDescriptor(method_id) << "->"
<< dex_file.GetMethodName(method_id)
@@ -949,7 +949,7 @@
}
static std::string GetFieldDescription(const DexFile& dex_file, uint32_t index) {
- const DexFile::FieldId& field_id = dex_file.GetFieldId(index);
+ const dex::FieldId& field_id = dex_file.GetFieldId(index);
return std::string(dex_file.GetFieldDeclaringClassDescriptor(field_id))
+ "->"
+ dex_file.GetFieldName(field_id)
@@ -965,7 +965,7 @@
// and have the same recorded flags.
ClassLinker* class_linker = Runtime::Current()->GetClassLinker();
for (const auto& entry : fields) {
- const DexFile::FieldId& field_id = dex_file.GetFieldId(entry.GetDexFieldIndex());
+ const dex::FieldId& field_id = dex_file.GetFieldId(entry.GetDexFieldIndex());
StringPiece name(dex_file.StringDataByIdx(field_id.name_idx_));
StringPiece type(dex_file.StringDataByIdx(dex_file.GetTypeId(field_id.type_idx_).descriptor_idx_));
// Only use field_id.class_idx_ when the entry is unresolved, which is rare.
@@ -1011,7 +1011,7 @@
}
static std::string GetMethodDescription(const DexFile& dex_file, uint32_t index) {
- const DexFile::MethodId& method_id = dex_file.GetMethodId(index);
+ const dex::MethodId& method_id = dex_file.GetMethodId(index);
return std::string(dex_file.GetMethodDeclaringClassDescriptor(method_id))
+ "->"
+ dex_file.GetMethodName(method_id)
@@ -1026,7 +1026,7 @@
PointerSize pointer_size = class_linker->GetImagePointerSize();
for (const auto& entry : methods) {
- const DexFile::MethodId& method_id = dex_file.GetMethodId(entry.GetDexMethodIndex());
+ const dex::MethodId& method_id = dex_file.GetMethodId(entry.GetDexMethodIndex());
const char* name = dex_file.GetMethodName(method_id);
const Signature signature = dex_file.GetMethodSignature(method_id);
diff --git a/test/1919-vminit-thread-start-timing/src/art/Test1919.java b/test/1919-vminit-thread-start-timing/src/art/Test1919.java
index 3d5c079..f6b770f 100644
--- a/test/1919-vminit-thread-start-timing/src/art/Test1919.java
+++ b/test/1919-vminit-thread-start-timing/src/art/Test1919.java
@@ -21,10 +21,12 @@
public static void run() {
for (Event e : getEvents()) {
- if (PRINT_ALL_THREADS ||
- e.thr.equals(Thread.currentThread()) ||
- e.thr.getName().equals("JVMTI_THREAD-Test1919")) {
- System.out.println(e.name + ": " + e.thr.getName());
+ if (e.thr != null) {
+ if (PRINT_ALL_THREADS ||
+ e.thr.equals(Thread.currentThread()) ||
+ e.thr.getName().equals("JVMTI_THREAD-Test1919")) {
+ System.out.println(e.name + ": " + e.thr.getName());
+ }
}
}
}
diff --git a/test/580-crc32/src/Main.java b/test/580-crc32/src/Main.java
index 6199e9b..dfc0b3c 100644
--- a/test/580-crc32/src/Main.java
+++ b/test/580-crc32/src/Main.java
@@ -16,6 +16,7 @@
import java.util.zip.CRC32;
import java.util.Random;
+import java.nio.ByteBuffer;
/**
* The ART compiler can use intrinsics for the java.util.zip.CRC32 methods:
@@ -343,8 +344,193 @@
CRC32ByteArray(bytes, off, len));
}
+ private static long CRC32ByteBuffer(byte[] bytes, int off, int len) {
+ ByteBuffer buf = ByteBuffer.wrap(bytes, 0, off + len);
+ buf.position(off);
+ CRC32 crc32 = new CRC32();
+ crc32.update(buf);
+ return crc32.getValue();
+ }
+
+ private static void TestCRC32UpdateByteBuffer() {
+ assertEqual(0L, CRC32ByteBuffer(new byte[] {}, 0, 0));
+ assertEqual(0L, CRC32ByteBuffer(new byte[] {0}, 0, 0));
+ assertEqual(0L, CRC32ByteBuffer(new byte[] {0}, 1, 0));
+ assertEqual(0L, CRC32ByteBuffer(new byte[] {0, 0}, 1, 0));
+
+ assertEqual(CRC32Byte(0), CRC32ByteBuffer(new byte[] {0}, 0, 1));
+ assertEqual(CRC32Byte(1), CRC32ByteBuffer(new byte[] {1}, 0, 1));
+ assertEqual(CRC32Byte(0x0f), CRC32ByteBuffer(new byte[] {0x0f}, 0, 1));
+ assertEqual(CRC32Byte(0xff), CRC32ByteBuffer(new byte[] {-1}, 0, 1));
+ assertEqual(CRC32BytesUsingUpdateInt(0, 0, 0),
+ CRC32ByteBuffer(new byte[] {0, 0, 0}, 0, 3));
+ assertEqual(CRC32BytesUsingUpdateInt(1, 1, 1),
+ CRC32ByteBuffer(new byte[] {1, 1, 1}, 0, 3));
+ assertEqual(CRC32BytesUsingUpdateInt(0x0f, 0x0f, 0x0f),
+ CRC32ByteBuffer(new byte[] {0x0f, 0x0f, 0x0f}, 0, 3));
+ assertEqual(CRC32BytesUsingUpdateInt(0xff, 0xff, 0xff),
+ CRC32ByteBuffer(new byte[] {-1, -1, -1}, 0, 3));
+ assertEqual(CRC32BytesUsingUpdateInt(1, 2),
+ CRC32ByteBuffer(new byte[] {1, 2}, 0, 2));
+ assertEqual(
+ CRC32BytesUsingUpdateInt(0, -1, Byte.MIN_VALUE, Byte.MAX_VALUE),
+ CRC32ByteBuffer(new byte[] {0, -1, Byte.MIN_VALUE, Byte.MAX_VALUE}, 0, 4));
+
+ byte[] bytes = new byte[128 * 1024];
+ Random rnd = new Random(0);
+ rnd.nextBytes(bytes);
+
+ assertEqual(CRC32BytesUsingUpdateInt(bytes, 0, 8 * 1024),
+ CRC32ByteBuffer(bytes, 0, 8 * 1024));
+
+ int off = rnd.nextInt(bytes.length / 2);
+ for (int len = 0; len <= 16; ++len) {
+ assertEqual(CRC32BytesUsingUpdateInt(bytes, off, len),
+ CRC32ByteBuffer(bytes, off, len));
+ }
+
+ // Check there are no issues with unaligned accesses.
+ for (int o = 1; o < 8; ++o) {
+ for (int l = 0; l <= 16; ++l) {
+ assertEqual(CRC32BytesUsingUpdateInt(bytes, o, l),
+ CRC32ByteBuffer(bytes, o, l));
+ }
+ }
+
+ int len = bytes.length / 2;
+ assertEqual(CRC32BytesUsingUpdateInt(bytes, 0, len - 1),
+ CRC32ByteBuffer(bytes, 0, len - 1));
+ assertEqual(CRC32BytesUsingUpdateInt(bytes, 0, len),
+ CRC32ByteBuffer(bytes, 0, len));
+ assertEqual(CRC32BytesUsingUpdateInt(bytes, 0, len + 1),
+ CRC32ByteBuffer(bytes, 0, len + 1));
+
+ len = rnd.nextInt(bytes.length + 1);
+ off = rnd.nextInt(bytes.length - len);
+ assertEqual(CRC32BytesUsingUpdateInt(bytes, off, len),
+ CRC32ByteBuffer(bytes, off, len));
+ }
+
+ private static long CRC32DirectByteBuffer(byte[] bytes, int off, int len) {
+ final int total_len = off + len;
+ ByteBuffer buf = ByteBuffer.allocateDirect(total_len).put(bytes, 0, total_len);
+ buf.position(off);
+ CRC32 crc32 = new CRC32();
+ crc32.update(buf);
+ return crc32.getValue();
+ }
+
+ private static long CRC32ByteAndDirectByteBuffer(int value, byte[] bytes) {
+ ByteBuffer buf = ByteBuffer.allocateDirect(bytes.length).put(bytes);
+ buf.position(0);
+ CRC32 crc32 = new CRC32();
+ crc32.update(value);
+ crc32.update(buf);
+ return crc32.getValue();
+ }
+
+ private static long CRC32DirectByteBufferAndByte(byte[] bytes, int value) {
+ ByteBuffer buf = ByteBuffer.allocateDirect(bytes.length).put(bytes);
+ buf.position(0);
+ CRC32 crc32 = new CRC32();
+ crc32.update(buf);
+ crc32.update(value);
+ return crc32.getValue();
+ }
+
+ private static void TestCRC32UpdateDirectByteBuffer() {
+ assertEqual(0L, CRC32DirectByteBuffer(new byte[] {}, 0, 0));
+ assertEqual(0L, CRC32DirectByteBuffer(new byte[] {0}, 0, 0));
+ assertEqual(0L, CRC32DirectByteBuffer(new byte[] {0}, 1, 0));
+ assertEqual(0L, CRC32DirectByteBuffer(new byte[] {0, 0}, 1, 0));
+
+ assertEqual(CRC32Byte(0), CRC32DirectByteBuffer(new byte[] {0}, 0, 1));
+ assertEqual(CRC32Byte(1), CRC32DirectByteBuffer(new byte[] {1}, 0, 1));
+ assertEqual(CRC32Byte(0x0f), CRC32DirectByteBuffer(new byte[] {0x0f}, 0, 1));
+ assertEqual(CRC32Byte(0xff), CRC32DirectByteBuffer(new byte[] {-1}, 0, 1));
+ assertEqual(CRC32BytesUsingUpdateInt(0, 0, 0),
+ CRC32DirectByteBuffer(new byte[] {0, 0, 0}, 0, 3));
+ assertEqual(CRC32BytesUsingUpdateInt(1, 1, 1),
+ CRC32DirectByteBuffer(new byte[] {1, 1, 1}, 0, 3));
+ assertEqual(CRC32BytesUsingUpdateInt(0x0f, 0x0f, 0x0f),
+ CRC32DirectByteBuffer(new byte[] {0x0f, 0x0f, 0x0f}, 0, 3));
+ assertEqual(CRC32BytesUsingUpdateInt(0xff, 0xff, 0xff),
+ CRC32DirectByteBuffer(new byte[] {-1, -1, -1}, 0, 3));
+ assertEqual(CRC32BytesUsingUpdateInt(1, 2),
+ CRC32DirectByteBuffer(new byte[] {1, 2}, 0, 2));
+ assertEqual(
+ CRC32BytesUsingUpdateInt(0, -1, Byte.MIN_VALUE, Byte.MAX_VALUE),
+ CRC32DirectByteBuffer(new byte[] {0, -1, Byte.MIN_VALUE, Byte.MAX_VALUE}, 0, 4));
+
+ assertEqual(CRC32BytesUsingUpdateInt(0, 0, 0),
+ CRC32ByteAndDirectByteBuffer(0, new byte[] {0, 0}));
+ assertEqual(CRC32BytesUsingUpdateInt(1, 1, 1),
+ CRC32ByteAndDirectByteBuffer(1, new byte[] {1, 1}));
+ assertEqual(CRC32BytesUsingUpdateInt(0x0f, 0x0f, 0x0f),
+ CRC32ByteAndDirectByteBuffer(0x0f, new byte[] {0x0f, 0x0f}));
+ assertEqual(CRC32BytesUsingUpdateInt(0xff, 0xff, 0xff),
+ CRC32ByteAndDirectByteBuffer(-1, new byte[] {-1, -1}));
+ assertEqual(CRC32BytesUsingUpdateInt(1, 2, 3),
+ CRC32ByteAndDirectByteBuffer(1, new byte[] {2, 3}));
+ assertEqual(
+ CRC32BytesUsingUpdateInt(0, -1, Byte.MIN_VALUE, Byte.MAX_VALUE),
+ CRC32ByteAndDirectByteBuffer(0, new byte[] {-1, Byte.MIN_VALUE, Byte.MAX_VALUE}));
+
+ assertEqual(CRC32BytesUsingUpdateInt(0, 0, 0),
+ CRC32DirectByteBufferAndByte(new byte[] {0, 0}, 0));
+ assertEqual(CRC32BytesUsingUpdateInt(1, 1, 1),
+ CRC32DirectByteBufferAndByte(new byte[] {1, 1}, 1));
+ assertEqual(CRC32BytesUsingUpdateInt(0x0f, 0x0f, 0x0f),
+ CRC32DirectByteBufferAndByte(new byte[] {0x0f, 0x0f}, 0x0f));
+ assertEqual(CRC32BytesUsingUpdateInt(0xff, 0xff, 0xff),
+ CRC32DirectByteBufferAndByte(new byte[] {-1, -1}, -1));
+ assertEqual(CRC32BytesUsingUpdateInt(1, 2, 3),
+ CRC32DirectByteBufferAndByte(new byte[] {1, 2}, 3));
+ assertEqual(
+ CRC32BytesUsingUpdateInt(0, -1, Byte.MIN_VALUE, Byte.MAX_VALUE),
+ CRC32DirectByteBufferAndByte(new byte[] {0, -1, Byte.MIN_VALUE}, Byte.MAX_VALUE));
+
+ byte[] bytes = new byte[128 * 1024];
+ Random rnd = new Random(0);
+ rnd.nextBytes(bytes);
+
+ assertEqual(CRC32BytesUsingUpdateInt(bytes, 0, bytes.length),
+ CRC32DirectByteBuffer(bytes, 0, bytes.length));
+ assertEqual(CRC32BytesUsingUpdateInt(bytes, 0, 8 * 1024),
+ CRC32DirectByteBuffer(bytes, 0, 8 * 1024));
+
+ int off = rnd.nextInt(bytes.length / 2);
+ for (int len = 0; len <= 16; ++len) {
+ assertEqual(CRC32BytesUsingUpdateInt(bytes, off, len),
+ CRC32DirectByteBuffer(bytes, off, len));
+ }
+
+ // Check there are no issues with unaligned accesses.
+ for (int o = 1; o < 8; ++o) {
+ for (int l = 0; l <= 16; ++l) {
+ assertEqual(CRC32BytesUsingUpdateInt(bytes, o, l),
+ CRC32DirectByteBuffer(bytes, o, l));
+ }
+ }
+
+ int len = bytes.length / 2;
+ assertEqual(CRC32BytesUsingUpdateInt(bytes, 0, len - 1),
+ CRC32DirectByteBuffer(bytes, 0, len - 1));
+ assertEqual(CRC32BytesUsingUpdateInt(bytes, 0, len),
+ CRC32DirectByteBuffer(bytes, 0, len));
+ assertEqual(CRC32BytesUsingUpdateInt(bytes, 0, len + 1),
+ CRC32DirectByteBuffer(bytes, 0, len + 1));
+
+ len = rnd.nextInt(bytes.length + 1);
+ off = rnd.nextInt(bytes.length - len);
+ assertEqual(CRC32BytesUsingUpdateInt(bytes, off, len),
+ CRC32DirectByteBuffer(bytes, off, len));
+ }
+
public static void main(String args[]) {
TestCRC32Update();
TestCRC32UpdateBytes();
+ TestCRC32UpdateByteBuffer();
+ TestCRC32UpdateDirectByteBuffer();
}
}
diff --git a/test/911-get-stack-trace/expected.txt b/test/911-get-stack-trace/expected.txt
index 8dd49aa..3179424 100644
--- a/test/911-get-stack-trace/expected.txt
+++ b/test/911-get-stack-trace/expected.txt
@@ -388,7 +388,7 @@
Test911
getAllStackTraces (I)[[Ljava/lang/Object; -1 -2
printAll (I)V 0 75
- doTest ()V 118 59
+ doTest ()V 120 59
run ()V 24 37
---------
@@ -643,7 +643,7 @@
Test911
getAllStackTraces (I)[[Ljava/lang/Object; -1 -2
printAll (I)V 0 75
- doTest ()V 123 61
+ doTest ()V 125 61
run ()V 24 37
---------
@@ -675,7 +675,7 @@
Test911
getThreadListStackTraces ([Ljava/lang/Thread;I)[[Ljava/lang/Object; -1 -2
printList ([Ljava/lang/Thread;I)V 0 68
- doTest ()V 108 54
+ doTest ()V 110 54
run ()V 32 41
---------
@@ -732,7 +732,7 @@
Test911
getThreadListStackTraces ([Ljava/lang/Thread;I)[[Ljava/lang/Object; -1 -2
printList ([Ljava/lang/Thread;I)V 0 68
- doTest ()V 113 56
+ doTest ()V 115 56
run ()V 32 41
---------
diff --git a/test/knownfailures.json b/test/knownfailures.json
index 879f2fd..983c16a 100644
--- a/test/knownfailures.json
+++ b/test/knownfailures.json
@@ -6,6 +6,12 @@
"bug": "http://b/33389022"
},
{
+ "tests": "132-daemon-locks-shutdown",
+ "description": ["This test seems to fail occasionally on redefine-stress for unknown reasons without stack-traces"],
+ "variant": "redefine-stress",
+ "bug": "http://b/121302864"
+ },
+ {
"tests": "579-inline-infinite",
"description": ["This test seems to fail often on redefine-stress for unknown reasons"],
"variant": "redefine-stress",
diff --git a/tools/bootjars.sh b/tools/bootjars.sh
index 9f22827..320d4b5 100755
--- a/tools/bootjars.sh
+++ b/tools/bootjars.sh
@@ -75,7 +75,7 @@
# Note: This must start with the CORE_IMG_JARS in Android.common_path.mk
# because that's what we use for compiling the core.art image.
# It may contain additional modules from TEST_CORE_JARS.
- core_jars_list="core-oj core-libart core-simple"
+ core_jars_list="core-oj core-libart core-simple okhttp bouncycastle apache-xml conscrypt"
core_jars_suffix=
if [[ $mode == target ]]; then
core_jars_suffix=-testdex
diff --git a/tools/build_linux_bionic.sh b/tools/build_linux_bionic.sh
index 94ccc41..d3c1912 100755
--- a/tools/build_linux_bionic.sh
+++ b/tools/build_linux_bionic.sh
@@ -42,6 +42,7 @@
out_dir=$(get_build_var OUT_DIR)
host_out=$(get_build_var HOST_OUT)
+mk_product_out=$(get_build_var PRODUCT_OUT)
# TODO(b/31559095) Figure out a better way to do this.
#
@@ -52,6 +53,12 @@
cat $out_dir/soong/soong.variables > ${tmp_soong_var}
build/soong/soong_ui.bash --make-mode clean
mkdir -p $out_dir/soong
+mkdir -p $mk_product_out
+
+# TODO(b/31559095) Soong will panic if this file isn't present. It contains
+# information from MAKE needed to let soong handle the invocation of dex2oat.
+# This would be great to have but for now isn't needed.
+echo "{}" > $mk_product_out/dexpreopt.config
python3 <<END - ${tmp_soong_var} ${out_dir}/soong/soong.variables
import json
diff --git a/tools/dexanalyze/dexanalyze_bytecode.cc b/tools/dexanalyze/dexanalyze_bytecode.cc
index 88db672..ae88f37 100644
--- a/tools/dexanalyze/dexanalyze_bytecode.cc
+++ b/tools/dexanalyze/dexanalyze_bytecode.cc
@@ -360,7 +360,7 @@
case Instruction::INVOKE_INTERFACE:
case Instruction::INVOKE_SUPER: {
const uint32_t method_idx = DexMethodIndex(inst.Inst());
- const DexFile::MethodId& method = dex_file.GetMethodId(method_idx);
+ const dex::MethodId& method = dex_file.GetMethodId(method_idx);
const dex::TypeIndex receiver_type = method.class_idx_;
if (Enabled(kExperimentInvoke)) {
if (count_types) {
diff --git a/tools/dexanalyze/dexanalyze_bytecode.h b/tools/dexanalyze/dexanalyze_bytecode.h
index 015801f..da4249d 100644
--- a/tools/dexanalyze/dexanalyze_bytecode.h
+++ b/tools/dexanalyze/dexanalyze_bytecode.h
@@ -24,6 +24,7 @@
#include "base/safe_map.h"
#include "dexanalyze_experiments.h"
#include "dex/code_item_accessors.h"
+#include "dex/dex_file_types.h"
namespace art {
namespace dexanalyze {
diff --git a/tools/hiddenapi/hiddenapi.cc b/tools/hiddenapi/hiddenapi.cc
index 3e38b97..6af822d 100644
--- a/tools/hiddenapi/hiddenapi.cc
+++ b/tools/hiddenapi/hiddenapi.cc
@@ -113,7 +113,7 @@
std::set<std::string> GetInterfaceDescriptors() const {
std::set<std::string> list;
- const DexFile::TypeList* ifaces = dex_file_.GetInterfacesList(GetClassDef());
+ const dex::TypeList* ifaces = dex_file_.GetInterfacesList(GetClassDef());
for (uint32_t i = 0; ifaces != nullptr && i < ifaces->Size(); ++i) {
list.insert(dex_file_.StringByTypeIdx(ifaces->GetTypeItem(i).type_idx_));
}
@@ -201,12 +201,12 @@
return down_cast<const ClassAccessor::Method&>(item_);
}
- inline const DexFile::MethodId& GetMethodId() const {
+ inline const dex::MethodId& GetMethodId() const {
DCHECK(IsMethod());
return item_.GetDexFile().GetMethodId(item_.GetIndex());
}
- inline const DexFile::FieldId& GetFieldId() const {
+ inline const dex::FieldId& GetFieldId() const {
DCHECK(!IsMethod());
return item_.GetDexFile().GetFieldId(item_.GetIndex());
}
@@ -665,7 +665,7 @@
}
// Find the old MapList, find its size.
- const DexFile::MapList* old_map = old_dex_.GetMapList();
+ const dex::MapList* old_map = old_dex_.GetMapList();
CHECK_LT(old_map->size_, std::numeric_limits<uint32_t>::max());
// Compute the size of the new dex file. We append the HiddenapiClassData,
@@ -674,7 +674,7 @@
<< "End of input dex file is not 4-byte aligned, possibly because its MapList is not "
<< "at the end of the file.";
size_t size_delta =
- RoundUp(hiddenapi_class_data_.size(), kMapListAlignment) + sizeof(DexFile::MapItem);
+ RoundUp(hiddenapi_class_data_.size(), kMapListAlignment) + sizeof(dex::MapItem);
size_t new_size = old_dex_.Size() + size_delta;
AllocateMemory(new_size);
@@ -742,7 +742,7 @@
// Load the location of header and map list before we start editing the file.
loaded_dex_header_ = const_cast<DexFile::Header*>(&loaded_dex_->GetHeader());
- loaded_dex_maplist_ = const_cast<DexFile::MapList*>(loaded_dex_->GetMapList());
+ loaded_dex_maplist_ = const_cast<dex::MapList*>(loaded_dex_->GetMapList());
}
DexFile::Header& GetHeader() const {
@@ -750,7 +750,7 @@
return *loaded_dex_header_;
}
- DexFile::MapList& GetMapList() const {
+ dex::MapList& GetMapList() const {
CHECK(loaded_dex_maplist_ != nullptr);
return *loaded_dex_maplist_;
}
@@ -804,16 +804,16 @@
InsertPadding(/* alignment= */ kMapListAlignment);
size_t new_map_offset = offset_;
- DexFile::MapList* map = Append(old_dex_.GetMapList(), old_dex_.GetMapList()->Size());
+ dex::MapList* map = Append(old_dex_.GetMapList(), old_dex_.GetMapList()->Size());
// Check last map entry is a pointer to itself.
- DexFile::MapItem& old_item = map->list_[map->size_ - 1];
+ dex::MapItem& old_item = map->list_[map->size_ - 1];
CHECK(old_item.type_ == DexFile::kDexTypeMapList);
CHECK_EQ(old_item.size_, 1u);
CHECK_EQ(old_item.offset_, GetHeader().map_off_);
// Create a new MapItem entry with new MapList details.
- DexFile::MapItem new_item;
+ dex::MapItem new_item;
new_item.type_ = old_item.type_;
new_item.unused_ = 0u; // initialize to ensure dex output is deterministic (b/119308882)
new_item.size_ = old_item.size_;
@@ -824,7 +824,7 @@
// Append a new MapItem and return its pointer.
map->size_++;
- Append(&new_item, sizeof(DexFile::MapItem));
+ Append(&new_item, sizeof(dex::MapItem));
// Change penultimate entry to point to metadata.
old_item.type_ = DexFile::kDexTypeHiddenapiClassData;
@@ -853,7 +853,7 @@
std::unique_ptr<const DexFile> loaded_dex_;
DexFile::Header* loaded_dex_header_;
- DexFile::MapList* loaded_dex_maplist_;
+ dex::MapList* loaded_dex_maplist_;
};
class HiddenApi final {
diff --git a/tools/hiddenapi/hiddenapi_test.cc b/tools/hiddenapi/hiddenapi_test.cc
index f10d3f4..2689eed 100644
--- a/tools/hiddenapi/hiddenapi_test.cc
+++ b/tools/hiddenapi/hiddenapi_test.cc
@@ -113,17 +113,17 @@
return ofs;
}
- const DexFile::ClassDef& FindClass(const char* desc, const DexFile& dex_file) {
- const DexFile::TypeId* type_id = dex_file.FindTypeId(desc);
+ const dex::ClassDef& FindClass(const char* desc, const DexFile& dex_file) {
+ const dex::TypeId* type_id = dex_file.FindTypeId(desc);
CHECK(type_id != nullptr) << "Could not find class " << desc;
- const DexFile::ClassDef* found = dex_file.FindClassDef(dex_file.GetIndexForTypeId(*type_id));
+ const dex::ClassDef* found = dex_file.FindClassDef(dex_file.GetIndexForTypeId(*type_id));
CHECK(found != nullptr) << "Could not find class " << desc;
return *found;
}
hiddenapi::ApiList GetFieldHiddenFlags(const char* name,
uint32_t expected_visibility,
- const DexFile::ClassDef& class_def,
+ const dex::ClassDef& class_def,
const DexFile& dex_file) {
ClassAccessor accessor(dex_file, class_def, /* parse hiddenapi flags */ true);
CHECK(accessor.HasClassData()) << "Class " << accessor.GetDescriptor() << " has no data";
@@ -133,7 +133,7 @@
}
for (const ClassAccessor::Field& field : accessor.GetFields()) {
- const DexFile::FieldId& fid = dex_file.GetFieldId(field.GetIndex());
+ const dex::FieldId& fid = dex_file.GetFieldId(field.GetIndex());
if (strcmp(name, dex_file.GetFieldName(fid)) == 0) {
const uint32_t actual_visibility = field.GetAccessFlags() & kAccVisibilityFlags;
CHECK_EQ(actual_visibility, expected_visibility)
@@ -150,7 +150,7 @@
hiddenapi::ApiList GetMethodHiddenFlags(const char* name,
uint32_t expected_visibility,
bool expected_native,
- const DexFile::ClassDef& class_def,
+ const dex::ClassDef& class_def,
const DexFile& dex_file) {
ClassAccessor accessor(dex_file, class_def, /* parse hiddenapi flags */ true);
CHECK(accessor.HasClassData()) << "Class " << accessor.GetDescriptor() << " has no data";
@@ -160,7 +160,7 @@
}
for (const ClassAccessor::Method& method : accessor.GetMethods()) {
- const DexFile::MethodId& mid = dex_file.GetMethodId(method.GetIndex());
+ const dex::MethodId& mid = dex_file.GetMethodId(method.GetIndex());
if (strcmp(name, dex_file.GetMethodName(mid)) == 0) {
CHECK_EQ(expected_native, method.MemberIsNative())
<< "Method " << name << " in class " << accessor.GetDescriptor();
diff --git a/tools/veridex/flow_analysis.cc b/tools/veridex/flow_analysis.cc
index 1fca7e1..65f2363 100644
--- a/tools/veridex/flow_analysis.cc
+++ b/tools/veridex/flow_analysis.cc
@@ -131,15 +131,15 @@
RegisterValue VeriFlowAnalysis::GetReturnType(uint32_t method_index) {
const DexFile& dex_file = resolver_->GetDexFile();
- const DexFile::MethodId& method_id = dex_file.GetMethodId(method_index);
- const DexFile::ProtoId& proto_id = dex_file.GetMethodPrototype(method_id);
+ const dex::MethodId& method_id = dex_file.GetMethodId(method_index);
+ const dex::ProtoId& proto_id = dex_file.GetMethodPrototype(method_id);
VeriClass* cls = resolver_->GetVeriClass(proto_id.return_type_idx_);
return RegisterValue(RegisterSource::kMethod, DexFileReference(&dex_file, method_index), cls);
}
RegisterValue VeriFlowAnalysis::GetFieldType(uint32_t field_index) {
const DexFile& dex_file = resolver_->GetDexFile();
- const DexFile::FieldId& field_id = dex_file.GetFieldId(field_index);
+ const dex::FieldId& field_id = dex_file.GetFieldId(field_index);
VeriClass* cls = resolver_->GetVeriClass(field_id.type_idx_);
return RegisterValue(RegisterSource::kField, DexFileReference(&dex_file, field_index), cls);
}
@@ -716,7 +716,7 @@
RegisterValue obj = GetRegister(GetParameterAt(instruction, is_range, args, 0));
const VeriClass* cls = obj.GetType();
if (cls != nullptr && cls->GetClassDef() != nullptr) {
- const DexFile::ClassDef* def = cls->GetClassDef();
+ const dex::ClassDef* def = cls->GetClassDef();
return RegisterValue(
RegisterSource::kClass,
DexFileReference(&resolver_->GetDexFileOf(*cls), def->class_idx_.index_),
diff --git a/tools/veridex/hidden_api.cc b/tools/veridex/hidden_api.cc
index 6a04365..2af7b50 100644
--- a/tools/veridex/hidden_api.cc
+++ b/tools/veridex/hidden_api.cc
@@ -78,7 +78,7 @@
std::string HiddenApi::GetApiMethodName(const DexFile& dex_file, uint32_t method_index) {
std::stringstream ss;
- const DexFile::MethodId& method_id = dex_file.GetMethodId(method_index);
+ const dex::MethodId& method_id = dex_file.GetMethodId(method_index);
ss << dex_file.StringByTypeIdx(method_id.class_idx_)
<< "->"
<< dex_file.GetMethodName(method_id)
@@ -88,7 +88,7 @@
std::string HiddenApi::GetApiFieldName(const DexFile& dex_file, uint32_t field_index) {
std::stringstream ss;
- const DexFile::FieldId& field_id = dex_file.GetFieldId(field_index);
+ const dex::FieldId& field_id = dex_file.GetFieldId(field_index);
ss << dex_file.StringByTypeIdx(field_id.class_idx_)
<< "->"
<< dex_file.GetFieldName(field_id)
diff --git a/tools/veridex/resolver.cc b/tools/veridex/resolver.cc
index 56729ff..df097b6 100644
--- a/tools/veridex/resolver.cc
+++ b/tools/veridex/resolver.cc
@@ -19,6 +19,7 @@
#include "dex/class_accessor-inl.h"
#include "dex/dex_file-inl.h"
#include "dex/primitive.h"
+#include "dex/signature-inl.h"
#include "hidden_api.h"
#include "veridex.h"
@@ -46,7 +47,7 @@
}
static bool HasSameNameAndSignature(const DexFile& dex_file,
- const DexFile::MethodId& method_id,
+ const dex::MethodId& method_id,
const char* method_name,
const char* type) {
return strcmp(method_name, dex_file.GetMethodName(method_id)) == 0 &&
@@ -54,7 +55,7 @@
}
static bool HasSameNameAndSignature(const DexFile& dex_file,
- const DexFile::MethodId& method_id,
+ const dex::MethodId& method_id,
const char* method_name,
const Signature& signature) {
return strcmp(method_name, dex_file.GetMethodName(method_id)) == 0 &&
@@ -62,7 +63,7 @@
}
static bool HasSameNameAndType(const DexFile& dex_file,
- const DexFile::FieldId& field_id,
+ const dex::FieldId& field_id,
const char* field_name,
const char* field_type) {
return strcmp(field_name, dex_file.GetFieldName(field_id)) == 0 &&
@@ -139,7 +140,7 @@
const DexFile& other_dex_file = resolver->dex_file_;
ClassAccessor other_dex_accessor(other_dex_file, *kls.GetClassDef());
for (const ClassAccessor::Method& method : other_dex_accessor.GetMethods()) {
- const DexFile::MethodId& other_method_id = other_dex_file.GetMethodId(method.GetIndex());
+ const dex::MethodId& other_method_id = other_dex_file.GetMethodId(method.GetIndex());
if (HasSameNameAndSignature(other_dex_file,
other_method_id,
method_name,
@@ -160,7 +161,7 @@
}
// Look at methods in `kls`'s interface hierarchy.
- const DexFile::TypeList* interfaces = other_dex_file.GetInterfacesList(*kls.GetClassDef());
+ const dex::TypeList* interfaces = other_dex_file.GetInterfacesList(*kls.GetClassDef());
if (interfaces != nullptr) {
for (size_t i = 0; i < interfaces->Size(); i++) {
dex::TypeIndex idx = interfaces->GetTypeItem(i).type_idx_;
@@ -194,7 +195,7 @@
const DexFile& other_dex_file = resolver->dex_file_;
ClassAccessor other_dex_accessor(other_dex_file, *kls.GetClassDef());
for (const ClassAccessor::Field& field : other_dex_accessor.GetFields()) {
- const DexFile::FieldId& other_field_id = other_dex_file.GetFieldId(field.GetIndex());
+ const dex::FieldId& other_field_id = other_dex_file.GetFieldId(field.GetIndex());
if (HasSameNameAndType(other_dex_file,
other_field_id,
field_name,
@@ -204,7 +205,7 @@
}
// Look at fields in `kls`'s interface hierarchy.
- const DexFile::TypeList* interfaces = other_dex_file.GetInterfacesList(*kls.GetClassDef());
+ const dex::TypeList* interfaces = other_dex_file.GetInterfacesList(*kls.GetClassDef());
if (interfaces != nullptr) {
for (size_t i = 0; i < interfaces->Size(); i++) {
dex::TypeIndex idx = interfaces->GetTypeItem(i).type_idx_;
@@ -258,7 +259,7 @@
VeriMethod method_info = method_infos_[method_index];
if (method_info == nullptr) {
// Method is defined in another dex file.
- const DexFile::MethodId& method_id = dex_file_.GetMethodId(method_index);
+ const dex::MethodId& method_id = dex_file_.GetMethodId(method_index);
VeriClass* kls = GetVeriClass(method_id.class_idx_);
if (kls == nullptr) {
return nullptr;
@@ -276,7 +277,7 @@
VeriField field_info = field_infos_[field_index];
if (field_info == nullptr) {
// Field is defined in another dex file.
- const DexFile::FieldId& field_id = dex_file_.GetFieldId(field_index);
+ const dex::FieldId& field_id = dex_file_.GetFieldId(field_index);
VeriClass* kls = GetVeriClass(field_id.class_idx_);
if (kls == nullptr) {
return nullptr;
diff --git a/tools/veridex/veridex.h b/tools/veridex/veridex.h
index e0d8261..f02de96 100644
--- a/tools/veridex/veridex.h
+++ b/tools/veridex/veridex.h
@@ -19,11 +19,14 @@
#include <map>
-#include "dex/dex_file.h"
#include "dex/primitive.h"
namespace art {
+namespace dex {
+struct ClassDef;
+} // namespace dex
+
static int gTargetSdkVersion = 1000; // Will be initialized after parsing options.
/**
@@ -45,7 +48,7 @@
class VeriClass {
public:
VeriClass() = default;
- VeriClass(Primitive::Type k, uint8_t dims, const DexFile::ClassDef* cl)
+ VeriClass(Primitive::Type k, uint8_t dims, const dex::ClassDef* cl)
: kind_(k), dimensions_(dims), class_def_(cl) {}
bool IsUninitialized() const {
@@ -62,7 +65,7 @@
Primitive::Type GetKind() const { return kind_; }
uint8_t GetDimensions() const { return dimensions_; }
- const DexFile::ClassDef* GetClassDef() const { return class_def_; }
+ const dex::ClassDef* GetClassDef() const { return class_def_; }
static VeriClass* object_;
static VeriClass* class_;
@@ -92,7 +95,7 @@
private:
Primitive::Type kind_;
uint8_t dimensions_;
- const DexFile::ClassDef* class_def_;
+ const dex::ClassDef* class_def_;
};
inline bool IsGetMethod(VeriMethod method) {