Merge "Avoid crash in StringReference.Value JDWP command" into lmp-dev
diff --git a/build/Android.gtest.mk b/build/Android.gtest.mk
index 9ee3b69..a7d852b 100644
--- a/build/Android.gtest.mk
+++ b/build/Android.gtest.mk
@@ -76,6 +76,7 @@
runtime/base/histogram_test.cc \
runtime/base/mutex_test.cc \
runtime/base/scoped_flock_test.cc \
+ runtime/base/stringprintf_test.cc \
runtime/base/timing_logger_test.cc \
runtime/base/unix_file/fd_file_test.cc \
runtime/base/unix_file/mapped_file_test.cc \
diff --git a/compiler/driver/compiler_driver.cc b/compiler/driver/compiler_driver.cc
index 6c614a3..fa2a560 100644
--- a/compiler/driver/compiler_driver.cc
+++ b/compiler/driver/compiler_driver.cc
@@ -608,7 +608,7 @@
Resolve(class_loader, dex_files, thread_pool, timings);
if (!compiler_options_->IsVerificationEnabled()) {
- VLOG(compiler) << "Verify none mode specified, skipping verification.";
+ LOG(INFO) << "Verify none mode specified, skipping verification.";
SetVerified(class_loader, dex_files, thread_pool, timings);
return;
}
@@ -1758,8 +1758,11 @@
ClassReference ref(manager->GetDexFile(), class_def_index);
manager->GetCompiler()->RecordClassStatus(ref, klass->GetStatus());
}
+ } else {
+ Thread* self = soa.Self();
+ DCHECK(self->IsExceptionPending());
+ self->ClearException();
}
- soa.Self()->AssertNoPendingException();
}
void CompilerDriver::SetVerifiedDexFile(jobject class_loader, const DexFile& dex_file,
diff --git a/compiler/elf_fixup.cc b/compiler/elf_fixup.cc
index 60f76ef..0155c82 100644
--- a/compiler/elf_fixup.cc
+++ b/compiler/elf_fixup.cc
@@ -85,17 +85,18 @@
bool ElfFixup::FixupSectionHeaders(ElfFile& elf_file, uintptr_t base_address) {
for (Elf32_Word i = 0; i < elf_file.GetSectionHeaderNum(); i++) {
- Elf32_Shdr& sh = elf_file.GetSectionHeader(i);
+ Elf32_Shdr* sh = elf_file.GetSectionHeader(i);
+ CHECK(sh != nullptr);
// 0 implies that the section will not exist in the memory of the process
- if (sh.sh_addr == 0) {
+ if (sh->sh_addr == 0) {
continue;
}
if (DEBUG_FIXUP) {
LOG(INFO) << StringPrintf("In %s moving Elf32_Shdr[%d] from 0x%08x to 0x%08" PRIxPTR,
elf_file.GetFile().GetPath().c_str(), i,
- sh.sh_addr, sh.sh_addr + base_address);
+ sh->sh_addr, sh->sh_addr + base_address);
}
- sh.sh_addr += base_address;
+ sh->sh_addr += base_address;
}
return true;
}
@@ -103,18 +104,19 @@
bool ElfFixup::FixupProgramHeaders(ElfFile& elf_file, uintptr_t base_address) {
// TODO: ELFObjectFile doesn't have give to Elf32_Phdr, so we do that ourselves for now.
for (Elf32_Word i = 0; i < elf_file.GetProgramHeaderNum(); i++) {
- Elf32_Phdr& ph = elf_file.GetProgramHeader(i);
- CHECK_EQ(ph.p_vaddr, ph.p_paddr) << elf_file.GetFile().GetPath() << " i=" << i;
- CHECK((ph.p_align == 0) || (0 == ((ph.p_vaddr - ph.p_offset) & (ph.p_align - 1))))
+ Elf32_Phdr* ph = elf_file.GetProgramHeader(i);
+ CHECK(ph != nullptr);
+ CHECK_EQ(ph->p_vaddr, ph->p_paddr) << elf_file.GetFile().GetPath() << " i=" << i;
+ CHECK((ph->p_align == 0) || (0 == ((ph->p_vaddr - ph->p_offset) & (ph->p_align - 1))))
<< elf_file.GetFile().GetPath() << " i=" << i;
if (DEBUG_FIXUP) {
LOG(INFO) << StringPrintf("In %s moving Elf32_Phdr[%d] from 0x%08x to 0x%08" PRIxPTR,
elf_file.GetFile().GetPath().c_str(), i,
- ph.p_vaddr, ph.p_vaddr + base_address);
+ ph->p_vaddr, ph->p_vaddr + base_address);
}
- ph.p_vaddr += base_address;
- ph.p_paddr += base_address;
- CHECK((ph.p_align == 0) || (0 == ((ph.p_vaddr - ph.p_offset) & (ph.p_align - 1))))
+ ph->p_vaddr += base_address;
+ ph->p_paddr += base_address;
+ CHECK((ph->p_align == 0) || (0 == ((ph->p_vaddr - ph->p_offset) & (ph->p_align - 1))))
<< elf_file.GetFile().GetPath() << " i=" << i;
}
return true;
@@ -124,20 +126,21 @@
Elf32_Word section_type = dynamic ? SHT_DYNSYM : SHT_SYMTAB;
// TODO: Unfortunate ELFObjectFile has protected symbol access, so use ElfFile
Elf32_Shdr* symbol_section = elf_file.FindSectionByType(section_type);
- if (symbol_section == NULL) {
+ if (symbol_section == nullptr) {
// file is missing optional .symtab
CHECK(!dynamic) << elf_file.GetFile().GetPath();
return true;
}
for (uint32_t i = 0; i < elf_file.GetSymbolNum(*symbol_section); i++) {
- Elf32_Sym& symbol = elf_file.GetSymbol(section_type, i);
- if (symbol.st_value != 0) {
+ Elf32_Sym* symbol = elf_file.GetSymbol(section_type, i);
+ CHECK(symbol != nullptr);
+ if (symbol->st_value != 0) {
if (DEBUG_FIXUP) {
LOG(INFO) << StringPrintf("In %s moving Elf32_Sym[%d] from 0x%08x to 0x%08" PRIxPTR,
elf_file.GetFile().GetPath().c_str(), i,
- symbol.st_value, symbol.st_value + base_address);
+ symbol->st_value, symbol->st_value + base_address);
}
- symbol.st_value += base_address;
+ symbol->st_value += base_address;
}
}
return true;
@@ -145,10 +148,11 @@
bool ElfFixup::FixupRelocations(ElfFile& elf_file, uintptr_t base_address) {
for (Elf32_Word i = 0; i < elf_file.GetSectionHeaderNum(); i++) {
- Elf32_Shdr& sh = elf_file.GetSectionHeader(i);
- if (sh.sh_type == SHT_REL) {
- for (uint32_t i = 0; i < elf_file.GetRelNum(sh); i++) {
- Elf32_Rel& rel = elf_file.GetRel(sh, i);
+ Elf32_Shdr* sh = elf_file.GetSectionHeader(i);
+ CHECK(sh != nullptr);
+ if (sh->sh_type == SHT_REL) {
+ for (uint32_t i = 0; i < elf_file.GetRelNum(*sh); i++) {
+ Elf32_Rel& rel = elf_file.GetRel(*sh, i);
if (DEBUG_FIXUP) {
LOG(INFO) << StringPrintf("In %s moving Elf32_Rel[%d] from 0x%08x to 0x%08" PRIxPTR,
elf_file.GetFile().GetPath().c_str(), i,
@@ -156,9 +160,9 @@
}
rel.r_offset += base_address;
}
- } else if (sh.sh_type == SHT_RELA) {
- for (uint32_t i = 0; i < elf_file.GetRelaNum(sh); i++) {
- Elf32_Rela& rela = elf_file.GetRela(sh, i);
+ } else if (sh->sh_type == SHT_RELA) {
+ for (uint32_t i = 0; i < elf_file.GetRelaNum(*sh); i++) {
+ Elf32_Rela& rela = elf_file.GetRela(*sh, i);
if (DEBUG_FIXUP) {
LOG(INFO) << StringPrintf("In %s moving Elf32_Rela[%d] from 0x%08x to 0x%08" PRIxPTR,
elf_file.GetFile().GetPath().c_str(), i,
diff --git a/compiler/elf_patcher.cc b/compiler/elf_patcher.cc
index ea4517f..e4c957a 100644
--- a/compiler/elf_patcher.cc
+++ b/compiler/elf_patcher.cc
@@ -273,7 +273,7 @@
<< "We got more patches than anticipated";
CHECK_LE(reinterpret_cast<uintptr_t>(elf_file_->Begin()) + shdr->sh_offset + shdr->sh_size,
reinterpret_cast<uintptr_t>(elf_file_->End())) << "section is too large";
- CHECK(shdr == &elf_file_->GetSectionHeader(elf_file_->GetSectionHeaderNum() - 1) ||
+ CHECK(shdr == elf_file_->GetSectionHeader(elf_file_->GetSectionHeaderNum() - 1) ||
shdr->sh_offset + shdr->sh_size <= (shdr + 1)->sh_offset)
<< "Section overlaps onto next section";
// It's mmap'd so we can just memcpy.
diff --git a/compiler/elf_stripper.cc b/compiler/elf_stripper.cc
index 0b86ad0..457d8a0 100644
--- a/compiler/elf_stripper.cc
+++ b/compiler/elf_stripper.cc
@@ -72,13 +72,15 @@
section_headers.reserve(elf_file->GetSectionHeaderNum());
- Elf32_Shdr& string_section = elf_file->GetSectionNameStringSection();
+ Elf32_Shdr* string_section = elf_file->GetSectionNameStringSection();
+ CHECK(string_section != nullptr);
for (Elf32_Word i = 0; i < elf_file->GetSectionHeaderNum(); i++) {
- Elf32_Shdr& sh = elf_file->GetSectionHeader(i);
- const char* name = elf_file->GetString(string_section, sh.sh_name);
- if (name == NULL) {
+ Elf32_Shdr* sh = elf_file->GetSectionHeader(i);
+ CHECK(sh != nullptr);
+ const char* name = elf_file->GetString(*string_section, sh->sh_name);
+ if (name == nullptr) {
CHECK_EQ(0U, i);
- section_headers.push_back(sh);
+ section_headers.push_back(*sh);
section_headers_original_indexes.push_back(0);
continue;
}
@@ -87,32 +89,34 @@
|| (strcmp(name, ".symtab") == 0)) {
continue;
}
- section_headers.push_back(sh);
+ section_headers.push_back(*sh);
section_headers_original_indexes.push_back(i);
}
CHECK_NE(0U, section_headers.size());
CHECK_EQ(section_headers.size(), section_headers_original_indexes.size());
// section 0 is the NULL section, sections start at offset of first section
- Elf32_Off offset = elf_file->GetSectionHeader(1).sh_offset;
+ CHECK(elf_file->GetSectionHeader(1) != nullptr);
+ Elf32_Off offset = elf_file->GetSectionHeader(1)->sh_offset;
for (size_t i = 1; i < section_headers.size(); i++) {
Elf32_Shdr& new_sh = section_headers[i];
- Elf32_Shdr& old_sh = elf_file->GetSectionHeader(section_headers_original_indexes[i]);
- CHECK_EQ(new_sh.sh_name, old_sh.sh_name);
- if (old_sh.sh_addralign > 1) {
- offset = RoundUp(offset, old_sh.sh_addralign);
+ Elf32_Shdr* old_sh = elf_file->GetSectionHeader(section_headers_original_indexes[i]);
+ CHECK(old_sh != nullptr);
+ CHECK_EQ(new_sh.sh_name, old_sh->sh_name);
+ if (old_sh->sh_addralign > 1) {
+ offset = RoundUp(offset, old_sh->sh_addralign);
}
- if (old_sh.sh_offset == offset) {
+ if (old_sh->sh_offset == offset) {
// already in place
- offset += old_sh.sh_size;
+ offset += old_sh->sh_size;
continue;
}
// shift section earlier
memmove(elf_file->Begin() + offset,
- elf_file->Begin() + old_sh.sh_offset,
- old_sh.sh_size);
+ elf_file->Begin() + old_sh->sh_offset,
+ old_sh->sh_size);
new_sh.sh_offset = offset;
- offset += old_sh.sh_size;
+ offset += old_sh->sh_size;
}
Elf32_Off shoff = offset;
diff --git a/dex2oat/dex2oat.cc b/dex2oat/dex2oat.cc
index bd39e67..e55e358 100644
--- a/dex2oat/dex2oat.cc
+++ b/dex2oat/dex2oat.cc
@@ -426,6 +426,11 @@
return nullptr;
}
+ // Flush result to disk. Patching code will re-open the file (mmap), so ensure that our view
+ // of the file already made it there and won't be re-ordered with writes from PatchOat or
+ // image patching.
+ oat_file->Flush();
+
if (!driver->IsImage() && driver->GetCompilerOptions().GetIncludePatchInformation()) {
t2.NewTiming("Patching ELF");
std::string error_msg;
@@ -1019,6 +1024,7 @@
include_debug_symbols = true;
} else if (option == "--no-include-debug-symbols" || option == "--strip-symbols") {
include_debug_symbols = false;
+ generate_gdb_information = false; // Depends on debug symbols, see above.
} else if (option.starts_with("--profile-file=")) {
profile_file = option.substr(strlen("--profile-file=")).data();
VLOG(compiler) << "dex2oat: profile file is " << profile_file;
diff --git a/disassembler/disassembler.cc b/disassembler/disassembler.cc
index 41ee213..c97bf64 100644
--- a/disassembler/disassembler.cc
+++ b/disassembler/disassembler.cc
@@ -19,6 +19,7 @@
#include <iostream>
#include "base/logging.h"
+#include "base/stringprintf.h"
#include "disassembler_arm.h"
#include "disassembler_arm64.h"
#include "disassembler_mips.h"
@@ -26,21 +27,30 @@
namespace art {
-Disassembler* Disassembler::Create(InstructionSet instruction_set) {
+Disassembler* Disassembler::Create(InstructionSet instruction_set, DisassemblerOptions* options) {
if (instruction_set == kArm || instruction_set == kThumb2) {
- return new arm::DisassemblerArm();
+ return new arm::DisassemblerArm(options);
} else if (instruction_set == kArm64) {
- return new arm64::DisassemblerArm64();
+ return new arm64::DisassemblerArm64(options);
} else if (instruction_set == kMips) {
- return new mips::DisassemblerMips();
+ return new mips::DisassemblerMips(options);
} else if (instruction_set == kX86) {
- return new x86::DisassemblerX86(false);
+ return new x86::DisassemblerX86(options, false);
} else if (instruction_set == kX86_64) {
- return new x86::DisassemblerX86(true);
+ return new x86::DisassemblerX86(options, true);
} else {
UNIMPLEMENTED(FATAL) << "no disassembler for " << instruction_set;
return NULL;
}
}
+std::string Disassembler::FormatInstructionPointer(const uint8_t* begin) {
+ if (disassembler_options_->absolute_addresses_) {
+ return StringPrintf("%p", begin);
+ } else {
+ size_t offset = begin - disassembler_options_->base_address_;
+ return StringPrintf("0x%08zx", offset);
+ }
+}
+
} // namespace art
diff --git a/disassembler/disassembler.h b/disassembler/disassembler.h
index 183e692..487f433 100644
--- a/disassembler/disassembler.h
+++ b/disassembler/disassembler.h
@@ -26,10 +26,31 @@
namespace art {
+class DisassemblerOptions {
+ public:
+ // Should the disassembler print absolute or relative addresses.
+ const bool absolute_addresses_;
+
+ // Base addess for calculating relative code offsets when absolute_addresses_ is false.
+ const uint8_t* const base_address_;
+
+ DisassemblerOptions(bool absolute_addresses, const uint8_t* base_address)
+ : absolute_addresses_(absolute_addresses), base_address_(base_address) {}
+
+ private:
+ DISALLOW_COPY_AND_ASSIGN(DisassemblerOptions);
+};
+
class Disassembler {
public:
- static Disassembler* Create(InstructionSet instruction_set);
- virtual ~Disassembler() {}
+ // Creates a Disassembler for the given InstructionSet with the
+ // non-null DisassemblerOptions which become owned by the
+ // Disassembler.
+ static Disassembler* Create(InstructionSet instruction_set, DisassemblerOptions* options);
+
+ virtual ~Disassembler() {
+ delete disassembler_options_;
+ }
// Dump a single instruction returning the length of that instruction.
virtual size_t Dump(std::ostream& os, const uint8_t* begin) = 0;
@@ -37,9 +58,15 @@
virtual void Dump(std::ostream& os, const uint8_t* begin, const uint8_t* end) = 0;
protected:
- Disassembler() {}
+ explicit Disassembler(DisassemblerOptions* disassembler_options)
+ : disassembler_options_(disassembler_options) {
+ CHECK(disassembler_options_ != nullptr);
+ }
+
+ std::string FormatInstructionPointer(const uint8_t* begin);
private:
+ DisassemblerOptions* disassembler_options_;
DISALLOW_COPY_AND_ASSIGN(Disassembler);
};
diff --git a/disassembler/disassembler_arm.cc b/disassembler/disassembler_arm.cc
index 56023c1..54e7761 100644
--- a/disassembler/disassembler_arm.cc
+++ b/disassembler/disassembler_arm.cc
@@ -94,7 +94,7 @@
}
void DisassemblerArm::DumpBranchTarget(std::ostream& os, const uint8_t* instr_ptr, int32_t imm32) {
- os << StringPrintf("%+d (%p)", imm32, instr_ptr + imm32);
+ os << StringPrintf("%+d (", imm32) << FormatInstructionPointer(instr_ptr + imm32) << ")";
}
static uint32_t ReadU16(const uint8_t* ptr) {
@@ -356,7 +356,9 @@
opcode += kConditionCodeNames[cond];
opcode += suffixes;
// TODO: a more complete ARM disassembler could generate wider opcodes.
- os << StringPrintf("%p: %08x\t%-7s ", instr_ptr, instruction, opcode.c_str()) << args.str() << '\n';
+ os << FormatInstructionPointer(instr_ptr)
+ << StringPrintf(": %08x\t%-7s ", instruction, opcode.c_str())
+ << args.str() << '\n';
}
int32_t ThumbExpand(int32_t imm12) {
@@ -1608,7 +1610,9 @@
opcode << "UNKNOWN " << op2;
}
- os << StringPrintf("%p: %08x\t%-7s ", instr_ptr, instr, opcode.str().c_str()) << args.str() << '\n';
+ os << FormatInstructionPointer(instr_ptr)
+ << StringPrintf(": %08x\t%-7s ", instr, opcode.str().c_str())
+ << args.str() << '\n';
return 4;
} // NOLINT(readability/fn_size)
@@ -1936,7 +1940,9 @@
it_conditions_.pop_back();
}
- os << StringPrintf("%p: %04x \t%-7s ", instr_ptr, instr, opcode.str().c_str()) << args.str() << '\n';
+ os << FormatInstructionPointer(instr_ptr)
+ << StringPrintf(": %04x \t%-7s ", instr, opcode.str().c_str())
+ << args.str() << '\n';
}
return 2;
}
diff --git a/disassembler/disassembler_arm.h b/disassembler/disassembler_arm.h
index f6d7fda..f870e8e 100644
--- a/disassembler/disassembler_arm.h
+++ b/disassembler/disassembler_arm.h
@@ -26,8 +26,7 @@
class DisassemblerArm FINAL : public Disassembler {
public:
- DisassemblerArm() {
- }
+ explicit DisassemblerArm(DisassemblerOptions* options) : Disassembler(options) {}
size_t Dump(std::ostream& os, const uint8_t* begin) OVERRIDE;
void Dump(std::ostream& os, const uint8_t* begin, const uint8_t* end) OVERRIDE;
diff --git a/disassembler/disassembler_arm64.cc b/disassembler/disassembler_arm64.cc
index 864d22d..5d0c218 100644
--- a/disassembler/disassembler_arm64.cc
+++ b/disassembler/disassembler_arm64.cc
@@ -34,7 +34,8 @@
size_t DisassemblerArm64::Dump(std::ostream& os, const uint8_t* begin) {
uint32_t instruction = ReadU32(begin);
decoder.Decode(reinterpret_cast<vixl::Instruction*>(&instruction));
- os << StringPrintf("%p: %08x\t%s\n", begin, instruction, disasm.GetOutput());
+ os << FormatInstructionPointer(begin)
+ << StringPrintf(": %08x\t%s\n", instruction, disasm.GetOutput());
return vixl::kInstructionSize;
}
diff --git a/disassembler/disassembler_arm64.h b/disassembler/disassembler_arm64.h
index 28c0fa7..ad20c70 100644
--- a/disassembler/disassembler_arm64.h
+++ b/disassembler/disassembler_arm64.h
@@ -27,7 +27,7 @@
class DisassemblerArm64 FINAL : public Disassembler {
public:
- DisassemblerArm64() {
+ explicit DisassemblerArm64(DisassemblerOptions* options) : Disassembler(options) {
decoder.AppendVisitor(&disasm);
}
diff --git a/disassembler/disassembler_mips.cc b/disassembler/disassembler_mips.cc
index 5e89f6f..bd5fac7 100644
--- a/disassembler/disassembler_mips.cc
+++ b/disassembler/disassembler_mips.cc
@@ -168,7 +168,7 @@
return ptr[0] | (ptr[1] << 8) | (ptr[2] << 16) | (ptr[3] << 24);
}
-static void DumpMips(std::ostream& os, const uint8_t* instr_ptr) {
+size_t DisassemblerMips::Dump(std::ostream& os, const uint8_t* instr_ptr) {
uint32_t instruction = ReadU32(instr_ptr);
uint32_t rs = (instruction >> 21) & 0x1f; // I-type, R-type.
@@ -197,7 +197,8 @@
int32_t offset = static_cast<int16_t>(instruction & 0xffff);
offset <<= 2;
offset += 4; // Delay slot.
- args << StringPrintf("%p ; %+d", instr_ptr + offset, offset);
+ args << FormatInstructionPointer(instr_ptr + offset)
+ << StringPrintf(" ; %+d", offset);
}
break;
case 'D': args << 'r' << rd; break;
@@ -254,17 +255,15 @@
}
}
- os << StringPrintf("%p: %08x\t%-7s ", instr_ptr, instruction, opcode.c_str()) << args.str() << '\n';
-}
-
-size_t DisassemblerMips::Dump(std::ostream& os, const uint8_t* begin) {
- DumpMips(os, begin);
+ os << FormatInstructionPointer(instr_ptr)
+ << StringPrintf(": %08x\t%-7s ", instruction, opcode.c_str())
+ << args.str() << '\n';
return 4;
}
void DisassemblerMips::Dump(std::ostream& os, const uint8_t* begin, const uint8_t* end) {
for (const uint8_t* cur = begin; cur < end; cur += 4) {
- DumpMips(os, cur);
+ Dump(os, cur);
}
}
diff --git a/disassembler/disassembler_mips.h b/disassembler/disassembler_mips.h
index e1fb034..00b2f8d 100644
--- a/disassembler/disassembler_mips.h
+++ b/disassembler/disassembler_mips.h
@@ -26,8 +26,7 @@
class DisassemblerMips FINAL : public Disassembler {
public:
- DisassemblerMips() {
- }
+ explicit DisassemblerMips(DisassemblerOptions* options) : Disassembler(options) {}
size_t Dump(std::ostream& os, const uint8_t* begin) OVERRIDE;
void Dump(std::ostream& os, const uint8_t* begin, const uint8_t* end) OVERRIDE;
diff --git a/disassembler/disassembler_x86.cc b/disassembler/disassembler_x86.cc
index 0ca8962..4708498 100644
--- a/disassembler/disassembler_x86.cc
+++ b/disassembler/disassembler_x86.cc
@@ -1227,7 +1227,9 @@
displacement = *reinterpret_cast<const int32_t*>(instr);
instr += 4;
}
- args << StringPrintf("%+d (%p)", displacement, instr + displacement);
+ args << StringPrintf("%+d (", displacement)
+ << FormatInstructionPointer(instr + displacement)
+ << ")";
}
if (prefix[1] == kFs && !supports_rex_) {
args << " ; ";
@@ -1250,8 +1252,8 @@
default: LOG(FATAL) << "Unreachable";
}
prefixed_opcode << opcode.str();
- os << StringPrintf("%p: %22s \t%-7s ", begin_instr, hex.str().c_str(),
- prefixed_opcode.str().c_str())
+ os << FormatInstructionPointer(begin_instr)
+ << StringPrintf(": %22s \t%-7s ", hex.str().c_str(), prefixed_opcode.str().c_str())
<< args.str() << '\n';
return instr - begin_instr;
} // NOLINT(readability/fn_size)
diff --git a/disassembler/disassembler_x86.h b/disassembler/disassembler_x86.h
index 2565bb1..f448662 100644
--- a/disassembler/disassembler_x86.h
+++ b/disassembler/disassembler_x86.h
@@ -24,8 +24,8 @@
class DisassemblerX86 FINAL : public Disassembler {
public:
- explicit DisassemblerX86(bool supports_rex) : supports_rex_(supports_rex) {
- }
+ DisassemblerX86(DisassemblerOptions* options, bool supports_rex)
+ : Disassembler(options), supports_rex_(supports_rex) {}
size_t Dump(std::ostream& os, const uint8_t* begin) OVERRIDE;
void Dump(std::ostream& os, const uint8_t* begin, const uint8_t* end) OVERRIDE;
diff --git a/oatdump/oatdump.cc b/oatdump/oatdump.cc
index 0065a1f..7607bf0 100644
--- a/oatdump/oatdump.cc
+++ b/oatdump/oatdump.cc
@@ -76,8 +76,8 @@
" Example: --boot-image=/system/framework/boot.art\n"
"\n");
fprintf(stderr,
- " --instruction-set=(arm|arm64|mips|x86|x86_64): for locating the image file based on the image location\n"
- " set.\n"
+ " --instruction-set=(arm|arm64|mips|x86|x86_64): for locating the image\n"
+ " file based on the image location set.\n"
" Example: --instruction-set=x86\n"
" Default: %s\n"
"\n",
@@ -87,9 +87,20 @@
" Example: --output=/tmp/oatdump.txt\n"
"\n");
fprintf(stderr,
- " --dump:[raw_mapping_table|raw_gc_map]\n"
- " Example: --dump:raw_gc_map\n"
- " Default: neither\n"
+ " --dump:raw_mapping_table enables dumping of the mapping table.\n"
+ " Example: --dump:raw_mapping_table\n"
+ "\n");
+ fprintf(stderr,
+ " --dump:raw_mapping_table enables dumping of the GC map.\n"
+ " Example: --dump:raw_gc_map\n"
+ "\n");
+ fprintf(stderr,
+ " --no-dump:vmap may be used to disable vmap dumping.\n"
+ " Example: --no-dump:vmap\n"
+ "\n");
+ fprintf(stderr,
+ " --no-disassemble may be used to disable disassembly.\n"
+ " Example: --no-disassemble\n"
"\n");
exit(EXIT_FAILURE);
}
@@ -105,18 +116,45 @@
"kClassRoots",
};
+class OatDumperOptions {
+ public:
+ OatDumperOptions(bool dump_raw_mapping_table,
+ bool dump_raw_gc_map,
+ bool dump_vmap,
+ bool disassemble_code,
+ bool absolute_addresses)
+ : dump_raw_mapping_table_(dump_raw_mapping_table),
+ dump_raw_gc_map_(dump_raw_gc_map),
+ dump_vmap_(dump_vmap),
+ disassemble_code_(disassemble_code),
+ absolute_addresses_(absolute_addresses) {}
+
+ const bool dump_raw_mapping_table_;
+ const bool dump_raw_gc_map_;
+ const bool dump_vmap_;
+ const bool disassemble_code_;
+ const bool absolute_addresses_;
+};
+
class OatDumper {
public:
- explicit OatDumper(const OatFile& oat_file, bool dump_raw_mapping_table, bool dump_raw_gc_map)
+ explicit OatDumper(const OatFile& oat_file, OatDumperOptions* options)
: oat_file_(oat_file),
oat_dex_files_(oat_file.GetOatDexFiles()),
- dump_raw_mapping_table_(dump_raw_mapping_table),
- dump_raw_gc_map_(dump_raw_gc_map),
- disassembler_(Disassembler::Create(oat_file_.GetOatHeader().GetInstructionSet())) {
+ options_(options),
+ disassembler_(Disassembler::Create(oat_file_.GetOatHeader().GetInstructionSet(),
+ new DisassemblerOptions(options_->absolute_addresses_,
+ oat_file.Begin()))) {
AddAllOffsets();
}
- void Dump(std::ostream& os) {
+ ~OatDumper() {
+ delete options_;
+ delete disassembler_;
+ }
+
+ bool Dump(std::ostream& os) {
+ bool success = true;
const OatHeader& oat_header = oat_file_.GetOatHeader();
os << "MAGIC:\n";
@@ -137,7 +175,7 @@
#define DUMP_OAT_HEADER_OFFSET(label, offset) \
os << label " OFFSET:\n"; \
os << StringPrintf("0x%08x", oat_header.offset()); \
- if (oat_header.offset() != 0) { \
+ if (oat_header.offset() != 0 && options_->absolute_addresses_) { \
os << StringPrintf(" (%p)", oat_file_.Begin() + oat_header.offset()); \
} \
os << StringPrintf("\n\n");
@@ -165,7 +203,10 @@
GetQuickToInterpreterBridgeOffset);
#undef DUMP_OAT_HEADER_OFFSET
- os << "IMAGE PATCH DELTA:\n" << oat_header.GetImagePatchDelta();
+ os << "IMAGE PATCH DELTA:\n";
+ os << StringPrintf("%d (0x%08x)\n\n",
+ oat_header.GetImagePatchDelta(),
+ oat_header.GetImagePatchDelta());
os << "IMAGE FILE LOCATION OAT CHECKSUM:\n";
os << StringPrintf("0x%08x\n\n", oat_header.GetImageFileLocationOatChecksum());
@@ -186,19 +227,28 @@
os << "\n";
}
- os << "BEGIN:\n";
- os << reinterpret_cast<const void*>(oat_file_.Begin()) << "\n\n";
+ if (options_->absolute_addresses_) {
+ os << "BEGIN:\n";
+ os << reinterpret_cast<const void*>(oat_file_.Begin()) << "\n\n";
- os << "END:\n";
- os << reinterpret_cast<const void*>(oat_file_.End()) << "\n\n";
+ os << "END:\n";
+ os << reinterpret_cast<const void*>(oat_file_.End()) << "\n\n";
+ }
+
+ os << "SIZE:\n";
+ os << oat_file_.Size() << "\n\n";
os << std::flush;
for (size_t i = 0; i < oat_dex_files_.size(); i++) {
const OatFile::OatDexFile* oat_dex_file = oat_dex_files_[i];
CHECK(oat_dex_file != nullptr);
- DumpOatDexFile(os, *oat_dex_file);
+ if (!DumpOatDexFile(os, *oat_dex_file)) {
+ success = false;
+ }
}
+ os << std::flush;
+ return success;
}
size_t ComputeSize(const void* oat_data) {
@@ -286,6 +336,10 @@
offsets_.insert(oat_file_.Size());
}
+ static uint32_t AlignCodeOffset(uint32_t maybe_thumb_offset) {
+ return maybe_thumb_offset & ~0x1; // TODO: Make this Thumb2 specific.
+ }
+
void AddOffsets(const OatFile::OatMethod& oat_method) {
uint32_t code_offset = oat_method.GetCodeOffset();
if (oat_file_.GetOatHeader().GetInstructionSet() == kThumb2) {
@@ -297,8 +351,9 @@
offsets_.insert(oat_method.GetNativeGcMapOffset());
}
- void DumpOatDexFile(std::ostream& os, const OatFile::OatDexFile& oat_dex_file) {
- os << "OAT DEX FILE:\n";
+ bool DumpOatDexFile(std::ostream& os, const OatFile::OatDexFile& oat_dex_file) {
+ bool success = true;
+ os << "OatDexFile:\n";
os << StringPrintf("location: %s\n", oat_dex_file.GetDexFileLocation().c_str());
os << StringPrintf("checksum: 0x%08x\n", oat_dex_file.GetDexFileLocationChecksum());
@@ -308,24 +363,30 @@
std::unique_ptr<const DexFile> dex_file(oat_dex_file.OpenDexFile(&error_msg));
if (dex_file.get() == nullptr) {
os << "NOT FOUND: " << error_msg << "\n\n";
- return;
+ os << std::flush;
+ return false;
}
for (size_t class_def_index = 0;
class_def_index < dex_file->NumClassDefs();
class_def_index++) {
const DexFile::ClassDef& class_def = dex_file->GetClassDef(class_def_index);
const char* descriptor = dex_file->GetClassDescriptor(class_def);
+ uint32_t oat_class_offset = oat_dex_file.GetOatClassOffset(class_def_index);
const OatFile::OatClass oat_class = oat_dex_file.GetOatClass(class_def_index);
- os << StringPrintf("%zd: %s (type_idx=%d)", class_def_index, descriptor, class_def.class_idx_)
+ os << StringPrintf("%zd: %s (offset=0x%08x) (type_idx=%d)",
+ class_def_index, descriptor, oat_class_offset, class_def.class_idx_)
<< " (" << oat_class.GetStatus() << ")"
<< " (" << oat_class.GetType() << ")\n";
// TODO: include bitmap here if type is kOatClassSomeCompiled?
Indenter indent_filter(os.rdbuf(), kIndentChar, kIndentBy1Count);
std::ostream indented_os(&indent_filter);
- DumpOatClass(indented_os, oat_class, *(dex_file.get()), class_def);
+ if (!DumpOatClass(indented_os, oat_class, *(dex_file.get()), class_def)) {
+ success = false;
+ }
}
os << std::flush;
+ return success;
}
static void SkipAllFields(ClassDataItemIterator& it) {
@@ -337,38 +398,51 @@
}
}
- void DumpOatClass(std::ostream& os, const OatFile::OatClass& oat_class, const DexFile& dex_file,
+ bool DumpOatClass(std::ostream& os, const OatFile::OatClass& oat_class, const DexFile& dex_file,
const DexFile::ClassDef& class_def) {
+ bool success = true;
const byte* class_data = dex_file.GetClassData(class_def);
if (class_data == nullptr) { // empty class such as a marker interface?
- return;
+ os << std::flush;
+ return success;
}
ClassDataItemIterator it(dex_file, class_data);
SkipAllFields(it);
- uint32_t class_method_idx = 0;
+ uint32_t class_method_index = 0;
while (it.HasNextDirectMethod()) {
- const OatFile::OatMethod oat_method = oat_class.GetOatMethod(class_method_idx);
- DumpOatMethod(os, class_def, class_method_idx, oat_method, dex_file,
- it.GetMemberIndex(), it.GetMethodCodeItem(), it.GetRawMemberAccessFlags());
- class_method_idx++;
+ if (!DumpOatMethod(os, class_def, class_method_index, oat_class, dex_file,
+ it.GetMemberIndex(), it.GetMethodCodeItem(),
+ it.GetRawMemberAccessFlags())) {
+ success = false;
+ }
+ class_method_index++;
it.Next();
}
while (it.HasNextVirtualMethod()) {
- const OatFile::OatMethod oat_method = oat_class.GetOatMethod(class_method_idx);
- DumpOatMethod(os, class_def, class_method_idx, oat_method, dex_file,
- it.GetMemberIndex(), it.GetMethodCodeItem(), it.GetRawMemberAccessFlags());
- class_method_idx++;
+ if (!DumpOatMethod(os, class_def, class_method_index, oat_class, dex_file,
+ it.GetMemberIndex(), it.GetMethodCodeItem(),
+ it.GetRawMemberAccessFlags())) {
+ success = false;
+ }
+ class_method_index++;
it.Next();
}
DCHECK(!it.HasNext());
os << std::flush;
+ return success;
}
- void DumpOatMethod(std::ostream& os, const DexFile::ClassDef& class_def,
+ static constexpr uint32_t kPrologueBytes = 16;
+
+ // When this was picked, the largest arm method was 55,256 bytes and arm64 was 50,412 bytes.
+ static constexpr uint32_t kMaxCodeSize = 100 * 1000;
+
+ bool DumpOatMethod(std::ostream& os, const DexFile::ClassDef& class_def,
uint32_t class_method_index,
- const OatFile::OatMethod& oat_method, const DexFile& dex_file,
+ const OatFile::OatClass& oat_class, const DexFile& dex_file,
uint32_t dex_method_idx, const DexFile::CodeItem* code_item,
uint32_t method_access_flags) {
+ bool success = true;
os << StringPrintf("%d: %s (dex_method_idx=%d)\n",
class_method_index, PrettyMethod(dex_method_idx, dex_file, true).c_str(),
dex_method_idx);
@@ -387,47 +461,183 @@
verifier.reset(DumpVerifier(*indent2_os, dex_method_idx, &dex_file, class_def, code_item,
method_access_flags));
}
- {
- *indent1_os << "OAT DATA:\n";
- *indent2_os << StringPrintf("frame_size_in_bytes: %zd\n", oat_method.GetFrameSizeInBytes());
- *indent2_os << StringPrintf("core_spill_mask: 0x%08x ", oat_method.GetCoreSpillMask());
- DumpSpillMask(*indent2_os, oat_method.GetCoreSpillMask(), false);
- *indent2_os << StringPrintf("\nfp_spill_mask: 0x%08x ", oat_method.GetFpSpillMask());
- DumpSpillMask(*indent2_os, oat_method.GetFpSpillMask(), true);
- *indent2_os << StringPrintf("\nvmap_table: %p (offset=0x%08x)\n",
- oat_method.GetVmapTable(), oat_method.GetVmapTableOffset());
- DumpVmap(*indent2_os, oat_method);
- *indent2_os << StringPrintf("mapping_table: %p (offset=0x%08x)\n",
- oat_method.GetMappingTable(), oat_method.GetMappingTableOffset());
- if (dump_raw_mapping_table_) {
- Indenter indent3_filter(indent2_os->rdbuf(), kIndentChar, kIndentBy1Count);
- std::ostream indent3_os(&indent3_filter);
- DumpMappingTable(indent3_os, oat_method);
+ uint32_t oat_method_offsets_offset = oat_class.GetOatMethodOffsetsOffset(class_method_index);
+ const OatMethodOffsets* oat_method_offsets = oat_class.GetOatMethodOffsets(class_method_index);
+ const OatFile::OatMethod oat_method = oat_class.GetOatMethod(class_method_index);
+ {
+ *indent1_os << "OatMethodOffsets ";
+ if (options_->absolute_addresses_) {
+ *indent1_os << StringPrintf("%p ", oat_method_offsets);
}
- *indent2_os << StringPrintf("gc_map: %p (offset=0x%08x)\n",
- oat_method.GetNativeGcMap(), oat_method.GetNativeGcMapOffset());
- if (dump_raw_gc_map_) {
+ *indent1_os << StringPrintf("(offset=0x%08x)\n", oat_method_offsets_offset);
+ if (oat_method_offsets_offset > oat_file_.Size()) {
+ *indent1_os << StringPrintf(
+ "WARNING: oat method offsets offset 0x%08x is past end of file 0x%08zx.\n",
+ oat_method_offsets_offset, oat_file_.Size());
+ // If we can't read OatMethodOffsets, the rest of the data is dangerous to read.
+ os << std::flush;
+ return false;
+ }
+
+ uint32_t code_offset = oat_method.GetCodeOffset();
+ *indent2_os << StringPrintf("code_offset: 0x%08x ", code_offset);
+ uint32_t aligned_code_begin = AlignCodeOffset(oat_method.GetCodeOffset());
+ if (aligned_code_begin > oat_file_.Size()) {
+ *indent2_os << StringPrintf("WARNING: "
+ "code offset 0x%08x is past end of file 0x%08zx.\n",
+ aligned_code_begin, oat_file_.Size());
+ success = false;
+ }
+ *indent2_os << "\n";
+
+ *indent2_os << "gc_map: ";
+ if (options_->absolute_addresses_) {
+ *indent2_os << StringPrintf("%p ", oat_method.GetNativeGcMap());
+ }
+ uint32_t gc_map_offset = oat_method.GetNativeGcMapOffset();
+ *indent2_os << StringPrintf("(offset=0x%08x)\n", gc_map_offset);
+ if (gc_map_offset > oat_file_.Size()) {
+ *indent2_os << StringPrintf("WARNING: "
+ "gc map table offset 0x%08x is past end of file 0x%08zx.\n",
+ gc_map_offset, oat_file_.Size());
+ success = false;
+ } else if (options_->dump_raw_gc_map_) {
Indenter indent3_filter(indent2_os->rdbuf(), kIndentChar, kIndentBy1Count);
std::ostream indent3_os(&indent3_filter);
DumpGcMap(indent3_os, oat_method, code_item);
}
}
{
- const void* code = oat_method.GetQuickCode();
- uint32_t code_size = oat_method.GetQuickCodeSize();
- if (code == nullptr) {
- code = oat_method.GetPortableCode();
- code_size = oat_method.GetPortableCodeSize();
- }
- *indent1_os << StringPrintf("CODE: %p (offset=0x%08x size=%d)%s\n",
- code,
- oat_method.GetCodeOffset(),
- code_size,
- code != nullptr ? "..." : "");
+ *indent1_os << "OatQuickMethodHeader ";
+ uint32_t method_header_offset = oat_method.GetOatQuickMethodHeaderOffset();
+ const OatQuickMethodHeader* method_header = oat_method.GetOatQuickMethodHeader();
- DumpCode(*indent2_os, verifier.get(), oat_method, code_item);
+ if (options_->absolute_addresses_) {
+ *indent1_os << StringPrintf("%p ", method_header);
+ }
+ *indent1_os << StringPrintf("(offset=0x%08x)\n", method_header_offset);
+ if (method_header_offset > oat_file_.Size()) {
+ *indent1_os << StringPrintf(
+ "WARNING: oat quick method header offset 0x%08x is past end of file 0x%08zx.\n",
+ method_header_offset, oat_file_.Size());
+ // If we can't read the OatQuickMethodHeader, the rest of the data is dangerous to read.
+ os << std::flush;
+ return false;
+ }
+
+ *indent2_os << "mapping_table: ";
+ if (options_->absolute_addresses_) {
+ *indent2_os << StringPrintf("%p ", oat_method.GetMappingTable());
+ }
+ uint32_t mapping_table_offset = oat_method.GetMappingTableOffset();
+ *indent2_os << StringPrintf("(offset=0x%08x)\n", oat_method.GetMappingTableOffset());
+ if (mapping_table_offset > oat_file_.Size()) {
+ *indent2_os << StringPrintf("WARNING: "
+ "mapping table offset 0x%08x is past end of file 0x%08zx. "
+ "mapping table offset was loaded from offset 0x%08x.\n",
+ mapping_table_offset, oat_file_.Size(),
+ oat_method.GetMappingTableOffsetOffset());
+ success = false;
+ } else if (options_->dump_raw_mapping_table_) {
+ Indenter indent3_filter(indent2_os->rdbuf(), kIndentChar, kIndentBy1Count);
+ std::ostream indent3_os(&indent3_filter);
+ DumpMappingTable(indent3_os, oat_method);
+ }
+
+ *indent2_os << "vmap_table: ";
+ if (options_->absolute_addresses_) {
+ *indent2_os << StringPrintf("%p ", oat_method.GetVmapTable());
+ }
+ uint32_t vmap_table_offset = oat_method.GetVmapTableOffset();
+ *indent2_os << StringPrintf("(offset=0x%08x)\n", vmap_table_offset);
+ if (vmap_table_offset > oat_file_.Size()) {
+ *indent2_os << StringPrintf("WARNING: "
+ "vmap table offset 0x%08x is past end of file 0x%08zx. "
+ "vmap table offset was loaded from offset 0x%08x.\n",
+ vmap_table_offset, oat_file_.Size(),
+ oat_method.GetVmapTableOffsetOffset());
+ success = false;
+ } else if (options_->dump_vmap_) {
+ DumpVmap(*indent2_os, oat_method);
+ }
}
+ {
+ *indent1_os << "QuickMethodFrameInfo\n";
+
+ *indent2_os << StringPrintf("frame_size_in_bytes: %zd\n", oat_method.GetFrameSizeInBytes());
+ *indent2_os << StringPrintf("core_spill_mask: 0x%08x ", oat_method.GetCoreSpillMask());
+ DumpSpillMask(*indent2_os, oat_method.GetCoreSpillMask(), false);
+ *indent2_os << "\n";
+ *indent2_os << StringPrintf("fp_spill_mask: 0x%08x ", oat_method.GetFpSpillMask());
+ DumpSpillMask(*indent2_os, oat_method.GetFpSpillMask(), true);
+ *indent2_os << "\n";
+ }
+ {
+ *indent1_os << "CODE: ";
+ uint32_t code_size_offset = oat_method.GetQuickCodeSizeOffset();
+ if (code_size_offset > oat_file_.Size()) {
+ *indent2_os << StringPrintf("WARNING: "
+ "code size offset 0x%08x is past end of file 0x%08zx.",
+ code_size_offset, oat_file_.Size());
+ success = false;
+ } else {
+ const void* code = oat_method.GetQuickCode();
+ uint32_t code_size = oat_method.GetQuickCodeSize();
+ if (code == nullptr) {
+ code = oat_method.GetPortableCode();
+ code_size = oat_method.GetPortableCodeSize();
+ code_size_offset = 0;
+ }
+ uint32_t code_offset = oat_method.GetCodeOffset();
+ uint32_t aligned_code_begin = AlignCodeOffset(code_offset);
+ uint64_t aligned_code_end = aligned_code_begin + code_size;
+
+ if (options_->absolute_addresses_) {
+ *indent1_os << StringPrintf("%p ", code);
+ }
+ *indent1_os << StringPrintf("(code_offset=0x%08x size_offset=0x%08x size=%u)%s\n",
+ code_offset,
+ code_size_offset,
+ code_size,
+ code != nullptr ? "..." : "");
+
+ if (aligned_code_begin > oat_file_.Size()) {
+ *indent2_os << StringPrintf("WARNING: "
+ "start of code at 0x%08x is past end of file 0x%08zx.",
+ aligned_code_begin, oat_file_.Size());
+ success = false;
+ } else if (aligned_code_end > oat_file_.Size()) {
+ *indent2_os << StringPrintf("WARNING: "
+ "end of code at 0x%08" PRIx64 " is past end of file 0x%08zx. "
+ "code size is 0x%08x loaded from offset 0x%08x.\n",
+ aligned_code_end, oat_file_.Size(),
+ code_size, code_size_offset);
+ success = false;
+ if (options_->disassemble_code_) {
+ if (code_size_offset + kPrologueBytes <= oat_file_.Size()) {
+ DumpCode(*indent2_os, verifier.get(), oat_method, code_item, true, kPrologueBytes);
+ }
+ }
+ } else if (code_size > kMaxCodeSize) {
+ *indent2_os << StringPrintf("WARNING: "
+ "code size %d is bigger than max expected threshold of %d. "
+ "code size is 0x%08x loaded from offset 0x%08x.\n",
+ code_size, kMaxCodeSize,
+ code_size, code_size_offset);
+ success = false;
+ if (options_->disassemble_code_) {
+ if (code_size_offset + kPrologueBytes <= oat_file_.Size()) {
+ DumpCode(*indent2_os, verifier.get(), oat_method, code_item, true, kPrologueBytes);
+ }
+ }
+ } else if (options_->disassemble_code_) {
+ DumpCode(*indent2_os, verifier.get(), oat_method, code_item, !success, 0);
+ }
+ }
+ }
+ os << std::flush;
+ return success;
}
void DumpSpillMask(std::ostream& os, uint32_t spill_mask, bool is_float) {
@@ -701,11 +911,14 @@
}
void DumpCode(std::ostream& os, verifier::MethodVerifier* verifier,
- const OatFile::OatMethod& oat_method, const DexFile::CodeItem* code_item) {
+ const OatFile::OatMethod& oat_method, const DexFile::CodeItem* code_item,
+ bool bad_input, size_t code_size) {
const void* portable_code = oat_method.GetPortableCode();
const void* quick_code = oat_method.GetQuickCode();
- size_t code_size = oat_method.GetQuickCodeSize();
+ if (code_size == 0) {
+ code_size = oat_method.GetQuickCodeSize();
+ }
if ((code_size == 0) || ((portable_code == nullptr) && (quick_code == nullptr))) {
os << "NO CODE!\n";
return;
@@ -713,13 +926,17 @@
const uint8_t* quick_native_pc = reinterpret_cast<const uint8_t*>(quick_code);
size_t offset = 0;
while (offset < code_size) {
- DumpMappingAtOffset(os, oat_method, offset, false);
+ if (!bad_input) {
+ DumpMappingAtOffset(os, oat_method, offset, false);
+ }
offset += disassembler_->Dump(os, quick_native_pc + offset);
- uint32_t dex_pc = DumpMappingAtOffset(os, oat_method, offset, true);
- if (dex_pc != DexFile::kDexNoIndex) {
- DumpGcMapAtNativePcOffset(os, oat_method, code_item, offset);
- if (verifier != nullptr) {
- DumpVRegsAtDexPc(os, verifier, oat_method, code_item, dex_pc);
+ if (!bad_input) {
+ uint32_t dex_pc = DumpMappingAtOffset(os, oat_method, offset, true);
+ if (dex_pc != DexFile::kDexNoIndex) {
+ DumpGcMapAtNativePcOffset(os, oat_method, code_item, offset);
+ if (verifier != nullptr) {
+ DumpVRegsAtDexPc(os, verifier, oat_method, code_item, dex_pc);
+ }
}
}
}
@@ -730,23 +947,22 @@
}
const OatFile& oat_file_;
- std::vector<const OatFile::OatDexFile*> oat_dex_files_;
- bool dump_raw_mapping_table_;
- bool dump_raw_gc_map_;
+ const std::vector<const OatFile::OatDexFile*> oat_dex_files_;
+ const OatDumperOptions* options_;
std::set<uintptr_t> offsets_;
- std::unique_ptr<Disassembler> disassembler_;
+ Disassembler* disassembler_;
};
class ImageDumper {
public:
explicit ImageDumper(std::ostream* os, gc::space::ImageSpace& image_space,
- const ImageHeader& image_header, bool dump_raw_mapping_table,
- bool dump_raw_gc_map)
- : os_(os), image_space_(image_space), image_header_(image_header),
- dump_raw_mapping_table_(dump_raw_mapping_table),
- dump_raw_gc_map_(dump_raw_gc_map) {}
+ const ImageHeader& image_header, OatDumperOptions* oat_dumper_options)
+ : os_(os),
+ image_space_(image_space),
+ image_header_(image_header),
+ oat_dumper_options_(oat_dumper_options) {}
- void Dump() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ bool Dump() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
std::ostream& os = *os_;
os << "MAGIC: " << image_header_.GetMagic() << "\n\n";
@@ -820,15 +1036,14 @@
oat_file = OatFile::Open(oat_location, oat_location, nullptr, false, &error_msg);
if (oat_file == nullptr) {
os << "NOT FOUND: " << error_msg << "\n";
- return;
+ return false;
}
}
os << "\n";
stats_.oat_file_bytes = oat_file->Size();
- oat_dumper_.reset(new OatDumper(*oat_file, dump_raw_mapping_table_,
- dump_raw_gc_map_));
+ oat_dumper_.reset(new OatDumper(*oat_file, oat_dumper_options_.release()));
for (const OatFile::OatDexFile* oat_dex_file : oat_file->GetOatDexFiles()) {
CHECK(oat_dex_file != nullptr);
@@ -896,7 +1111,7 @@
os << std::flush;
- oat_dumper_->Dump(os);
+ return oat_dumper_->Dump(os);
}
private:
@@ -1449,12 +1664,11 @@
// threshold, we assume 2 bytes per instruction and 2 instructions per block.
kLargeMethodDexBytes = 16000
};
- std::unique_ptr<OatDumper> oat_dumper_;
std::ostream* os_;
gc::space::ImageSpace& image_space_;
const ImageHeader& image_header_;
- bool dump_raw_mapping_table_;
- bool dump_raw_gc_map_;
+ std::unique_ptr<OatDumper> oat_dumper_;
+ std::unique_ptr<OatDumperOptions> oat_dumper_options_;
DISALLOW_COPY_AND_ASSIGN(ImageDumper);
};
@@ -1480,6 +1694,8 @@
std::unique_ptr<std::ofstream> out;
bool dump_raw_mapping_table = false;
bool dump_raw_gc_map = false;
+ bool dump_vmap = true;
+ bool disassemble_code = true;
for (int i = 0; i < argc; i++) {
const StringPiece option(argv[i]);
@@ -1502,15 +1718,14 @@
} else if (instruction_set_str == "x86_64") {
instruction_set = kX86_64;
}
- } else if (option.starts_with("--dump:")) {
- if (option == "--dump:raw_mapping_table") {
- dump_raw_mapping_table = true;
- } else if (option == "--dump:raw_gc_map") {
- dump_raw_gc_map = true;
- } else {
- fprintf(stderr, "Unknown argument %s\n", option.data());
- usage();
- }
+ } else if (option =="--dump:raw_mapping_table") {
+ dump_raw_mapping_table = true;
+ } else if (option == "--dump:raw_gc_map") {
+ dump_raw_gc_map = true;
+ } else if (option == "--no-dump:vmap") {
+ dump_vmap = false;
+ } else if (option == "--no-disassemble") {
+ disassemble_code = false;
} else if (option.starts_with("--output=")) {
const char* filename = option.substr(strlen("--output=")).data();
out.reset(new std::ofstream(filename));
@@ -1535,6 +1750,13 @@
return EXIT_FAILURE;
}
+ // If we are only doing the oat file, disable absolute_addresses. Keep them for image dumping.
+ bool absolute_addresses = (oat_filename == nullptr);
+ std::unique_ptr<OatDumperOptions> oat_dumper_options(new OatDumperOptions(dump_raw_mapping_table,
+ dump_raw_gc_map,
+ dump_vmap,
+ disassemble_code,
+ absolute_addresses));
if (oat_filename != nullptr) {
std::string error_msg;
OatFile* oat_file =
@@ -1543,9 +1765,9 @@
fprintf(stderr, "Failed to open oat file from '%s': %s\n", oat_filename, error_msg.c_str());
return EXIT_FAILURE;
}
- OatDumper oat_dumper(*oat_file, dump_raw_mapping_table, dump_raw_gc_map);
- oat_dumper.Dump(*os);
- return EXIT_SUCCESS;
+ OatDumper oat_dumper(*oat_file, oat_dumper_options.release());
+ bool success = oat_dumper.Dump(*os);
+ return (success) ? EXIT_SUCCESS : EXIT_FAILURE;
}
RuntimeOptions options;
@@ -1589,10 +1811,9 @@
fprintf(stderr, "Invalid image header %s\n", image_location);
return EXIT_FAILURE;
}
- ImageDumper image_dumper(os, *image_space, image_header,
- dump_raw_mapping_table, dump_raw_gc_map);
- image_dumper.Dump();
- return EXIT_SUCCESS;
+ ImageDumper image_dumper(os, *image_space, image_header, oat_dumper_options.release());
+ bool success = image_dumper.Dump();
+ return (success) ? EXIT_SUCCESS : EXIT_FAILURE;
}
} // namespace art
diff --git a/patchoat/patchoat.cc b/patchoat/patchoat.cc
index 55e8141..7403660 100644
--- a/patchoat/patchoat.cc
+++ b/patchoat/patchoat.cc
@@ -525,14 +525,15 @@
t.NewTiming("Fixup Elf Headers");
// Fixup Phdr's
for (unsigned int i = 0; i < oat_file_->GetProgramHeaderNum(); i++) {
- Elf32_Phdr& hdr = oat_file_->GetProgramHeader(i);
- if (hdr.p_vaddr != 0 && hdr.p_vaddr != hdr.p_offset) {
+ Elf32_Phdr* hdr = oat_file_->GetProgramHeader(i);
+ CHECK(hdr != nullptr);
+ if (hdr->p_vaddr != 0 && hdr->p_vaddr != hdr->p_offset) {
need_fixup = true;
- hdr.p_vaddr += delta_;
+ hdr->p_vaddr += delta_;
}
- if (hdr.p_paddr != 0 && hdr.p_paddr != hdr.p_offset) {
+ if (hdr->p_paddr != 0 && hdr->p_paddr != hdr->p_offset) {
need_fixup = true;
- hdr.p_paddr += delta_;
+ hdr->p_paddr += delta_;
}
}
if (!need_fixup) {
@@ -542,9 +543,10 @@
}
t.NewTiming("Fixup Section Headers");
for (unsigned int i = 0; i < oat_file_->GetSectionHeaderNum(); i++) {
- Elf32_Shdr& hdr = oat_file_->GetSectionHeader(i);
- if (hdr.sh_addr != 0) {
- hdr.sh_addr += delta_;
+ Elf32_Shdr* hdr = oat_file_->GetSectionHeader(i);
+ CHECK(hdr != nullptr);
+ if (hdr->sh_addr != 0) {
+ hdr->sh_addr += delta_;
}
}
diff --git a/runtime/Android.mk b/runtime/Android.mk
index 0ab6626..17ee8ab 100644
--- a/runtime/Android.mk
+++ b/runtime/Android.mk
@@ -118,6 +118,7 @@
native/java_lang_Thread.cc \
native/java_lang_Throwable.cc \
native/java_lang_VMClassLoader.cc \
+ native/java_lang_ref_FinalizerReference.cc \
native/java_lang_ref_Reference.cc \
native/java_lang_reflect_Array.cc \
native/java_lang_reflect_Constructor.cc \
diff --git a/runtime/base/mutex.cc b/runtime/base/mutex.cc
index d149fbe..fae8ba9 100644
--- a/runtime/base/mutex.cc
+++ b/runtime/base/mutex.cc
@@ -37,18 +37,24 @@
ReaderWriterMutex* Locks::classlinker_classes_lock_ = nullptr;
Mutex* Locks::deoptimization_lock_ = nullptr;
ReaderWriterMutex* Locks::heap_bitmap_lock_ = nullptr;
+Mutex* Locks::intern_table_lock_ = nullptr;
Mutex* Locks::logging_lock_ = nullptr;
Mutex* Locks::mem_maps_lock_ = nullptr;
Mutex* Locks::modify_ldt_lock_ = nullptr;
ReaderWriterMutex* Locks::mutator_lock_ = nullptr;
Mutex* Locks::profiler_lock_ = nullptr;
+Mutex* Locks::reference_processor_lock_ = nullptr;
+Mutex* Locks::reference_queue_cleared_references_lock_ = nullptr;
+Mutex* Locks::reference_queue_finalizer_references_lock_ = nullptr;
+Mutex* Locks::reference_queue_phantom_references_lock_ = nullptr;
+Mutex* Locks::reference_queue_soft_references_lock_ = nullptr;
+Mutex* Locks::reference_queue_weak_references_lock_ = nullptr;
Mutex* Locks::runtime_shutdown_lock_ = nullptr;
Mutex* Locks::thread_list_lock_ = nullptr;
Mutex* Locks::thread_list_suspend_thread_lock_ = nullptr;
Mutex* Locks::thread_suspend_count_lock_ = nullptr;
Mutex* Locks::trace_lock_ = nullptr;
Mutex* Locks::unexpected_signal_lock_ = nullptr;
-Mutex* Locks::intern_table_lock_ = nullptr;
struct AllMutexData {
// A guard for all_mutexes_ that's not a mutex (Mutexes must CAS to acquire and busy wait).
@@ -871,6 +877,10 @@
DCHECK(heap_bitmap_lock_ == nullptr);
heap_bitmap_lock_ = new ReaderWriterMutex("heap bitmap lock", current_lock_level);
+ UPDATE_CURRENT_LOCK_LEVEL(kTraceLock);
+ DCHECK(trace_lock_ == nullptr);
+ trace_lock_ = new Mutex("trace lock", current_lock_level);
+
UPDATE_CURRENT_LOCK_LEVEL(kRuntimeShutdownLock);
DCHECK(runtime_shutdown_lock_ == nullptr);
runtime_shutdown_lock_ = new Mutex("runtime shutdown lock", current_lock_level);
@@ -879,10 +889,6 @@
DCHECK(profiler_lock_ == nullptr);
profiler_lock_ = new Mutex("profiler lock", current_lock_level);
- UPDATE_CURRENT_LOCK_LEVEL(kTraceLock);
- DCHECK(trace_lock_ == nullptr);
- trace_lock_ = new Mutex("trace lock", current_lock_level);
-
UPDATE_CURRENT_LOCK_LEVEL(kDeoptimizationLock);
DCHECK(deoptimization_lock_ == nullptr);
deoptimization_lock_ = new Mutex("Deoptimization lock", current_lock_level);
@@ -922,6 +928,30 @@
DCHECK(intern_table_lock_ == nullptr);
intern_table_lock_ = new Mutex("InternTable lock", current_lock_level);
+ UPDATE_CURRENT_LOCK_LEVEL(kReferenceProcessorLock);
+ DCHECK(reference_processor_lock_ == nullptr);
+ reference_processor_lock_ = new Mutex("ReferenceProcessor lock", current_lock_level);
+
+ UPDATE_CURRENT_LOCK_LEVEL(kReferenceQueueClearedReferencesLock);
+ DCHECK(reference_queue_cleared_references_lock_ == nullptr);
+ reference_queue_cleared_references_lock_ = new Mutex("ReferenceQueue cleared references lock", current_lock_level);
+
+ UPDATE_CURRENT_LOCK_LEVEL(kReferenceQueueWeakReferencesLock);
+ DCHECK(reference_queue_weak_references_lock_ == nullptr);
+ reference_queue_weak_references_lock_ = new Mutex("ReferenceQueue cleared references lock", current_lock_level);
+
+ UPDATE_CURRENT_LOCK_LEVEL(kReferenceQueueFinalizerReferencesLock);
+ DCHECK(reference_queue_finalizer_references_lock_ == nullptr);
+ reference_queue_finalizer_references_lock_ = new Mutex("ReferenceQueue finalizer references lock", current_lock_level);
+
+ UPDATE_CURRENT_LOCK_LEVEL(kReferenceQueuePhantomReferencesLock);
+ DCHECK(reference_queue_phantom_references_lock_ == nullptr);
+ reference_queue_phantom_references_lock_ = new Mutex("ReferenceQueue phantom references lock", current_lock_level);
+
+ UPDATE_CURRENT_LOCK_LEVEL(kReferenceQueueSoftReferencesLock);
+ DCHECK(reference_queue_soft_references_lock_ == nullptr);
+ reference_queue_soft_references_lock_ = new Mutex("ReferenceQueue soft references lock", current_lock_level);
+
UPDATE_CURRENT_LOCK_LEVEL(kAbortLock);
DCHECK(abort_lock_ == nullptr);
abort_lock_ = new Mutex("abort lock", current_lock_level, true);
diff --git a/runtime/base/mutex.h b/runtime/base/mutex.h
index 89ae1ab..013c078 100644
--- a/runtime/base/mutex.h
+++ b/runtime/base/mutex.h
@@ -60,11 +60,16 @@
kThreadSuspendCountLock,
kAbortLock,
kJdwpSocketLock,
+ kReferenceQueueSoftReferencesLock,
+ kReferenceQueuePhantomReferencesLock,
+ kReferenceQueueFinalizerReferencesLock,
+ kReferenceQueueWeakReferencesLock,
+ kReferenceQueueClearedReferencesLock,
+ kReferenceProcessorLock,
kRosAllocGlobalLock,
kRosAllocBracketLock,
kRosAllocBulkFreeLock,
kAllocSpaceLock,
- kReferenceProcessorLock,
kDexFileMethodInlinerLock,
kDexFileToMethodInlinerMapLock,
kMarkSweepMarkStackLock,
@@ -87,12 +92,12 @@
kBreakpointInvokeLock,
kAllocTrackerLock,
kDeoptimizationLock,
- kTraceLock,
kProfilerLock,
kJdwpEventListLock,
kJdwpAttachLock,
kJdwpStartLock,
kRuntimeShutdownLock,
+ kTraceLock,
kHeapBitmapLock,
kMutatorLock,
kThreadListSuspendThreadLock,
@@ -591,8 +596,26 @@
// Guards intern table.
static Mutex* intern_table_lock_ ACQUIRED_AFTER(modify_ldt_lock_);
+ // Guards reference processor.
+ static Mutex* reference_processor_lock_ ACQUIRED_AFTER(intern_table_lock_);
+
+ // Guards cleared references queue.
+ static Mutex* reference_queue_cleared_references_lock_ ACQUIRED_AFTER(reference_processor_lock_);
+
+ // Guards weak references queue.
+ static Mutex* reference_queue_weak_references_lock_ ACQUIRED_AFTER(reference_queue_cleared_references_lock_);
+
+ // Guards finalizer references queue.
+ static Mutex* reference_queue_finalizer_references_lock_ ACQUIRED_AFTER(reference_queue_weak_references_lock_);
+
+ // Guards phantom references queue.
+ static Mutex* reference_queue_phantom_references_lock_ ACQUIRED_AFTER(reference_queue_finalizer_references_lock_);
+
+ // Guards soft references queue.
+ static Mutex* reference_queue_soft_references_lock_ ACQUIRED_AFTER(reference_queue_phantom_references_lock_);
+
// Have an exclusive aborting thread.
- static Mutex* abort_lock_ ACQUIRED_AFTER(intern_table_lock_);
+ static Mutex* abort_lock_ ACQUIRED_AFTER(reference_queue_soft_references_lock_);
// Allow mutual exclusion when manipulating Thread::suspend_count_.
// TODO: Does the trade-off of a per-thread lock make sense?
diff --git a/runtime/base/stringprintf_test.cc b/runtime/base/stringprintf_test.cc
new file mode 100644
index 0000000..0bfde33
--- /dev/null
+++ b/runtime/base/stringprintf_test.cc
@@ -0,0 +1,29 @@
+/*
+ * Copyright (C) 2011 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "stringprintf.h"
+
+#include "gtest/gtest.h"
+
+namespace art {
+
+TEST(StringPrintfTest, HexSizeT) {
+ size_t size = 0x00107e59;
+ EXPECT_STREQ("00107e59", StringPrintf("%08zx", size).c_str());
+ EXPECT_STREQ("0x00107e59", StringPrintf("0x%08zx", size).c_str());
+}
+
+} // namespace art
diff --git a/runtime/debugger.cc b/runtime/debugger.cc
index 2e92069..7a992b8 100644
--- a/runtime/debugger.cc
+++ b/runtime/debugger.cc
@@ -4307,7 +4307,7 @@
recent_allocation_records_ = new AllocRecord[alloc_record_max_];
CHECK(recent_allocation_records_ != NULL);
}
- Runtime::Current()->GetInstrumentation()->InstrumentQuickAllocEntryPoints();
+ Runtime::Current()->GetInstrumentation()->InstrumentQuickAllocEntryPoints(false);
} else {
{
ScopedObjectAccess soa(self); // For type_cache_.Clear();
@@ -4323,7 +4323,7 @@
type_cache_.Clear();
}
// If an allocation comes in before we uninstrument, we will safely drop it on the floor.
- Runtime::Current()->GetInstrumentation()->UninstrumentQuickAllocEntryPoints();
+ Runtime::Current()->GetInstrumentation()->UninstrumentQuickAllocEntryPoints(false);
}
}
diff --git a/runtime/elf_file.cc b/runtime/elf_file.cc
index f6e8921..0ab6394 100644
--- a/runtime/elf_file.cc
+++ b/runtime/elf_file.cc
@@ -202,12 +202,25 @@
}
}
- // Either way, the program header is relative to the elf header
- program_headers_start_ = Begin() + GetHeader().e_phoff;
+ if (program_header_only_) {
+ program_headers_start_ = Begin() + GetHeader().e_phoff;
+ } else {
+ if (!CheckAndSet(GetHeader().e_phoff, "program headers", &program_headers_start_, error_msg)) {
+ return false;
+ }
- if (!program_header_only_) {
// Setup section headers.
- section_headers_start_ = Begin() + GetHeader().e_shoff;
+ if (!CheckAndSet(GetHeader().e_shoff, "section headers", §ion_headers_start_, error_msg)) {
+ return false;
+ }
+
+ // Find shstrtab.
+ Elf32_Shdr* shstrtab_section_header = GetSectionNameStringSection();
+ if (shstrtab_section_header == nullptr) {
+ *error_msg = StringPrintf("Failed to find shstrtab section header in ELF file: '%s'",
+ file_->GetPath().c_str());
+ return false;
+ }
// Find .dynamic section info from program header
dynamic_program_header_ = FindProgamHeaderByType(PT_DYNAMIC);
@@ -217,48 +230,84 @@
return false;
}
- dynamic_section_start_
- = reinterpret_cast<Elf32_Dyn*>(Begin() + GetDynamicProgramHeader().p_offset);
+ if (!CheckAndSet(GetDynamicProgramHeader().p_offset, "dynamic section",
+ reinterpret_cast<byte**>(&dynamic_section_start_), error_msg)) {
+ return false;
+ }
// Find other sections from section headers
for (Elf32_Word i = 0; i < GetSectionHeaderNum(); i++) {
- Elf32_Shdr& section_header = GetSectionHeader(i);
- byte* section_addr = Begin() + section_header.sh_offset;
- switch (section_header.sh_type) {
+ Elf32_Shdr* section_header = GetSectionHeader(i);
+ if (section_header == nullptr) {
+ *error_msg = StringPrintf("Failed to find section header for section %d in ELF file: '%s'",
+ i, file_->GetPath().c_str());
+ return false;
+ }
+ switch (section_header->sh_type) {
case SHT_SYMTAB: {
- symtab_section_start_ = reinterpret_cast<Elf32_Sym*>(section_addr);
+ if (!CheckAndSet(section_header->sh_offset, "symtab",
+ reinterpret_cast<byte**>(&symtab_section_start_), error_msg)) {
+ return false;
+ }
break;
}
case SHT_DYNSYM: {
- dynsym_section_start_ = reinterpret_cast<Elf32_Sym*>(section_addr);
+ if (!CheckAndSet(section_header->sh_offset, "dynsym",
+ reinterpret_cast<byte**>(&dynsym_section_start_), error_msg)) {
+ return false;
+ }
break;
}
case SHT_STRTAB: {
// TODO: base these off of sh_link from .symtab and .dynsym above
- if ((section_header.sh_flags & SHF_ALLOC) != 0) {
- dynstr_section_start_ = reinterpret_cast<char*>(section_addr);
+ if ((section_header->sh_flags & SHF_ALLOC) != 0) {
+ // Check that this is named ".dynstr" and ignore otherwise.
+ const char* header_name = GetString(*shstrtab_section_header, section_header->sh_name);
+ if (strncmp(".dynstr", header_name, 8) == 0) {
+ if (!CheckAndSet(section_header->sh_offset, "dynstr",
+ reinterpret_cast<byte**>(&dynstr_section_start_), error_msg)) {
+ return false;
+ }
+ }
} else {
- strtab_section_start_ = reinterpret_cast<char*>(section_addr);
+ // Check that this is named ".strtab" and ignore otherwise.
+ const char* header_name = GetString(*shstrtab_section_header, section_header->sh_name);
+ if (strncmp(".strtab", header_name, 8) == 0) {
+ if (!CheckAndSet(section_header->sh_offset, "strtab",
+ reinterpret_cast<byte**>(&strtab_section_start_), error_msg)) {
+ return false;
+ }
+ }
}
break;
}
case SHT_DYNAMIC: {
- if (reinterpret_cast<byte*>(dynamic_section_start_) != section_addr) {
+ if (reinterpret_cast<byte*>(dynamic_section_start_) !=
+ Begin() + section_header->sh_offset) {
LOG(WARNING) << "Failed to find matching SHT_DYNAMIC for PT_DYNAMIC in "
<< file_->GetPath() << ": " << std::hex
<< reinterpret_cast<void*>(dynamic_section_start_)
- << " != " << reinterpret_cast<void*>(section_addr);
+ << " != " << reinterpret_cast<void*>(Begin() + section_header->sh_offset);
return false;
}
break;
}
case SHT_HASH: {
- hash_section_start_ = reinterpret_cast<Elf32_Word*>(section_addr);
+ if (!CheckAndSet(section_header->sh_offset, "hash section",
+ reinterpret_cast<byte**>(&hash_section_start_), error_msg)) {
+ return false;
+ }
break;
}
}
}
+
+ // Check for the existence of some sections.
+ if (!CheckSectionsExist(error_msg)) {
+ return false;
+ }
}
+
return true;
}
@@ -272,6 +321,117 @@
}
}
+bool ElfFile::CheckAndSet(Elf32_Off offset, const char* label,
+ byte** target, std::string* error_msg) {
+ if (Begin() + offset >= End()) {
+ *error_msg = StringPrintf("Offset %d is out of range for %s in ELF file: '%s'", offset, label,
+ file_->GetPath().c_str());
+ return false;
+ }
+ *target = Begin() + offset;
+ return true;
+}
+
+bool ElfFile::CheckSectionsLinked(const byte* source, const byte* target) const {
+ // Only works in whole-program mode, as we need to iterate over the sections.
+ // Note that we normally can't search by type, as duplicates are allowed for most section types.
+ if (program_header_only_) {
+ return true;
+ }
+
+ Elf32_Shdr* source_section = nullptr;
+ Elf32_Word target_index = 0;
+ bool target_found = false;
+ for (Elf32_Word i = 0; i < GetSectionHeaderNum(); i++) {
+ Elf32_Shdr* section_header = GetSectionHeader(i);
+
+ if (Begin() + section_header->sh_offset == source) {
+ // Found the source.
+ source_section = section_header;
+ if (target_index) {
+ break;
+ }
+ } else if (Begin() + section_header->sh_offset == target) {
+ target_index = i;
+ target_found = true;
+ if (source_section != nullptr) {
+ break;
+ }
+ }
+ }
+
+ return target_found && source_section != nullptr && source_section->sh_link == target_index;
+}
+
+bool ElfFile::CheckSectionsExist(std::string* error_msg) const {
+ if (!program_header_only_) {
+ // If in full mode, need section headers.
+ if (section_headers_start_ == nullptr) {
+ *error_msg = StringPrintf("No section headers in ELF file: '%s'", file_->GetPath().c_str());
+ return false;
+ }
+ }
+
+ // This is redundant, but defensive.
+ if (dynamic_program_header_ == nullptr) {
+ *error_msg = StringPrintf("Failed to find PT_DYNAMIC program header in ELF file: '%s'",
+ file_->GetPath().c_str());
+ return false;
+ }
+
+ // Need a dynamic section. This is redundant, but defensive.
+ if (dynamic_section_start_ == nullptr) {
+ *error_msg = StringPrintf("Failed to find dynamic section in ELF file: '%s'",
+ file_->GetPath().c_str());
+ return false;
+ }
+
+ // Symtab validation. These is not really a hard failure, as we are currently not using the
+ // symtab internally, but it's nice to be defensive.
+ if (symtab_section_start_ != nullptr) {
+ // When there's a symtab, there should be a strtab.
+ if (strtab_section_start_ == nullptr) {
+ *error_msg = StringPrintf("No strtab for symtab in ELF file: '%s'", file_->GetPath().c_str());
+ return false;
+ }
+
+ // The symtab should link to the strtab.
+ if (!CheckSectionsLinked(reinterpret_cast<const byte*>(symtab_section_start_),
+ reinterpret_cast<const byte*>(strtab_section_start_))) {
+ *error_msg = StringPrintf("Symtab is not linked to the strtab in ELF file: '%s'",
+ file_->GetPath().c_str());
+ return false;
+ }
+ }
+
+ // We always need a dynstr & dynsym.
+ if (dynstr_section_start_ == nullptr) {
+ *error_msg = StringPrintf("No dynstr in ELF file: '%s'", file_->GetPath().c_str());
+ return false;
+ }
+ if (dynsym_section_start_ == nullptr) {
+ *error_msg = StringPrintf("No dynsym in ELF file: '%s'", file_->GetPath().c_str());
+ return false;
+ }
+
+ // Need a hash section for dynamic symbol lookup.
+ if (hash_section_start_ == nullptr) {
+ *error_msg = StringPrintf("Failed to find hash section in ELF file: '%s'",
+ file_->GetPath().c_str());
+ return false;
+ }
+
+ // And the hash section should be linking to the dynsym.
+ if (!CheckSectionsLinked(reinterpret_cast<const byte*>(hash_section_start_),
+ reinterpret_cast<const byte*>(dynsym_section_start_))) {
+ *error_msg = StringPrintf("Hash section is not linked to the dynstr in ELF file: '%s'",
+ file_->GetPath().c_str());
+ return false;
+ }
+
+ return true;
+}
+
bool ElfFile::SetMap(MemMap* map, std::string* error_msg) {
if (map == nullptr) {
// MemMap::Open should have already set an error.
@@ -407,70 +567,68 @@
Elf32_Ehdr& ElfFile::GetHeader() const {
- CHECK(header_ != nullptr);
+ CHECK(header_ != nullptr); // Header has been checked in SetMap. This is a sanity check.
return *header_;
}
byte* ElfFile::GetProgramHeadersStart() const {
- CHECK(program_headers_start_ != nullptr);
+ CHECK(program_headers_start_ != nullptr); // Header has been set in Setup. This is a sanity
+ // check.
return program_headers_start_;
}
byte* ElfFile::GetSectionHeadersStart() const {
- CHECK(section_headers_start_ != nullptr);
+ CHECK(!program_header_only_); // Only used in "full" mode.
+ CHECK(section_headers_start_ != nullptr); // Is checked in CheckSectionsExist. Sanity check.
return section_headers_start_;
}
Elf32_Phdr& ElfFile::GetDynamicProgramHeader() const {
- CHECK(dynamic_program_header_ != nullptr);
+ CHECK(dynamic_program_header_ != nullptr); // Is checked in CheckSectionsExist. Sanity check.
return *dynamic_program_header_;
}
Elf32_Dyn* ElfFile::GetDynamicSectionStart() const {
- CHECK(dynamic_section_start_ != nullptr);
+ CHECK(dynamic_section_start_ != nullptr); // Is checked in CheckSectionsExist. Sanity check.
return dynamic_section_start_;
}
+static bool IsSymbolSectionType(Elf32_Word section_type) {
+ return ((section_type == SHT_SYMTAB) || (section_type == SHT_DYNSYM));
+}
+
Elf32_Sym* ElfFile::GetSymbolSectionStart(Elf32_Word section_type) const {
CHECK(IsSymbolSectionType(section_type)) << file_->GetPath() << " " << section_type;
- Elf32_Sym* symbol_section_start;
switch (section_type) {
case SHT_SYMTAB: {
- symbol_section_start = symtab_section_start_;
+ return symtab_section_start_;
break;
}
case SHT_DYNSYM: {
- symbol_section_start = dynsym_section_start_;
+ return dynsym_section_start_;
break;
}
default: {
LOG(FATAL) << section_type;
- symbol_section_start = nullptr;
+ return nullptr;
}
}
- CHECK(symbol_section_start != nullptr);
- return symbol_section_start;
}
const char* ElfFile::GetStringSectionStart(Elf32_Word section_type) const {
CHECK(IsSymbolSectionType(section_type)) << file_->GetPath() << " " << section_type;
- const char* string_section_start;
switch (section_type) {
case SHT_SYMTAB: {
- string_section_start = strtab_section_start_;
- break;
+ return strtab_section_start_;
}
case SHT_DYNSYM: {
- string_section_start = dynstr_section_start_;
- break;
+ return dynstr_section_start_;
}
default: {
LOG(FATAL) << section_type;
- string_section_start = nullptr;
+ return nullptr;
}
}
- CHECK(string_section_start != nullptr);
- return string_section_start;
}
const char* ElfFile::GetString(Elf32_Word section_type, Elf32_Word i) const {
@@ -479,12 +637,16 @@
return nullptr;
}
const char* string_section_start = GetStringSectionStart(section_type);
- const char* string = string_section_start + i;
- return string;
+ if (string_section_start == nullptr) {
+ return nullptr;
+ }
+ return string_section_start + i;
}
+// WARNING: The following methods do not check for an error condition (non-existent hash section).
+// It is the caller's job to do this.
+
Elf32_Word* ElfFile::GetHashSectionStart() const {
- CHECK(hash_section_start_ != nullptr);
return hash_section_start_;
}
@@ -496,14 +658,22 @@
return GetHashSectionStart()[1];
}
-Elf32_Word ElfFile::GetHashBucket(size_t i) const {
- CHECK_LT(i, GetHashBucketNum());
+Elf32_Word ElfFile::GetHashBucket(size_t i, bool* ok) const {
+ if (i >= GetHashBucketNum()) {
+ *ok = false;
+ return 0;
+ }
+ *ok = true;
// 0 is nbucket, 1 is nchain
return GetHashSectionStart()[2 + i];
}
-Elf32_Word ElfFile::GetHashChain(size_t i) const {
- CHECK_LT(i, GetHashChainNum());
+Elf32_Word ElfFile::GetHashChain(size_t i, bool* ok) const {
+ if (i >= GetHashBucketNum()) {
+ *ok = false;
+ return 0;
+ }
+ *ok = true;
// 0 is nbucket, 1 is nchain, & chains are after buckets
return GetHashSectionStart()[2 + GetHashBucketNum() + i];
}
@@ -512,18 +682,20 @@
return GetHeader().e_phnum;
}
-Elf32_Phdr& ElfFile::GetProgramHeader(Elf32_Word i) const {
- CHECK_LT(i, GetProgramHeaderNum()) << file_->GetPath();
+Elf32_Phdr* ElfFile::GetProgramHeader(Elf32_Word i) const {
+ CHECK_LT(i, GetProgramHeaderNum()) << file_->GetPath(); // Sanity check for caller.
byte* program_header = GetProgramHeadersStart() + (i * GetHeader().e_phentsize);
- CHECK_LT(program_header, End()) << file_->GetPath();
- return *reinterpret_cast<Elf32_Phdr*>(program_header);
+ if (program_header >= End()) {
+ return nullptr; // Failure condition.
+ }
+ return reinterpret_cast<Elf32_Phdr*>(program_header);
}
Elf32_Phdr* ElfFile::FindProgamHeaderByType(Elf32_Word type) const {
for (Elf32_Word i = 0; i < GetProgramHeaderNum(); i++) {
- Elf32_Phdr& program_header = GetProgramHeader(i);
- if (program_header.p_type == type) {
- return &program_header;
+ Elf32_Phdr* program_header = GetProgramHeader(i);
+ if (program_header->p_type == type) {
+ return program_header;
}
}
return nullptr;
@@ -533,14 +705,18 @@
return GetHeader().e_shnum;
}
-Elf32_Shdr& ElfFile::GetSectionHeader(Elf32_Word i) const {
+Elf32_Shdr* ElfFile::GetSectionHeader(Elf32_Word i) const {
// Can only access arbitrary sections when we have the whole file, not just program header.
// Even if we Load(), it doesn't bring in all the sections.
CHECK(!program_header_only_) << file_->GetPath();
- CHECK_LT(i, GetSectionHeaderNum()) << file_->GetPath();
+ if (i >= GetSectionHeaderNum()) {
+ return nullptr; // Failure condition.
+ }
byte* section_header = GetSectionHeadersStart() + (i * GetHeader().e_shentsize);
- CHECK_LT(section_header, End()) << file_->GetPath();
- return *reinterpret_cast<Elf32_Shdr*>(section_header);
+ if (section_header >= End()) {
+ return nullptr; // Failure condition.
+ }
+ return reinterpret_cast<Elf32_Shdr*>(section_header);
}
Elf32_Shdr* ElfFile::FindSectionByType(Elf32_Word type) const {
@@ -548,9 +724,9 @@
// We could change this to switch on known types if they were detected during loading.
CHECK(!program_header_only_) << file_->GetPath();
for (Elf32_Word i = 0; i < GetSectionHeaderNum(); i++) {
- Elf32_Shdr& section_header = GetSectionHeader(i);
- if (section_header.sh_type == type) {
- return §ion_header;
+ Elf32_Shdr* section_header = GetSectionHeader(i);
+ if (section_header->sh_type == type) {
+ return section_header;
}
}
return nullptr;
@@ -570,11 +746,15 @@
return h;
}
-Elf32_Shdr& ElfFile::GetSectionNameStringSection() const {
+Elf32_Shdr* ElfFile::GetSectionNameStringSection() const {
return GetSectionHeader(GetHeader().e_shstrndx);
}
const byte* ElfFile::FindDynamicSymbolAddress(const std::string& symbol_name) const {
+ // Check that we have a hash section.
+ if (GetHashSectionStart() == nullptr) {
+ return nullptr; // Failure condition.
+ }
const Elf32_Sym* sym = FindDynamicSymbol(symbol_name);
if (sym != nullptr) {
return base_address_ + sym->st_value;
@@ -583,6 +763,7 @@
}
}
+// WARNING: Only called from FindDynamicSymbolAddress. Elides check for hash section.
const Elf32_Sym* ElfFile::FindDynamicSymbol(const std::string& symbol_name) const {
if (GetHashBucketNum() == 0) {
// No dynamic symbols at all.
@@ -590,22 +771,28 @@
}
Elf32_Word hash = elfhash(symbol_name.c_str());
Elf32_Word bucket_index = hash % GetHashBucketNum();
- Elf32_Word symbol_and_chain_index = GetHashBucket(bucket_index);
+ bool ok;
+ Elf32_Word symbol_and_chain_index = GetHashBucket(bucket_index, &ok);
+ if (!ok) {
+ return nullptr;
+ }
while (symbol_and_chain_index != 0 /* STN_UNDEF */) {
- Elf32_Sym& symbol = GetSymbol(SHT_DYNSYM, symbol_and_chain_index);
- const char* name = GetString(SHT_DYNSYM, symbol.st_name);
- if (symbol_name == name) {
- return &symbol;
+ Elf32_Sym* symbol = GetSymbol(SHT_DYNSYM, symbol_and_chain_index);
+ if (symbol == nullptr) {
+ return nullptr; // Failure condition.
}
- symbol_and_chain_index = GetHashChain(symbol_and_chain_index);
+ const char* name = GetString(SHT_DYNSYM, symbol->st_name);
+ if (symbol_name == name) {
+ return symbol;
+ }
+ symbol_and_chain_index = GetHashChain(symbol_and_chain_index, &ok);
+ if (!ok) {
+ return nullptr;
+ }
}
return nullptr;
}
-bool ElfFile::IsSymbolSectionType(Elf32_Word section_type) {
- return ((section_type == SHT_SYMTAB) || (section_type == SHT_DYNSYM));
-}
-
Elf32_Word ElfFile::GetSymbolNum(Elf32_Shdr& section_header) const {
CHECK(IsSymbolSectionType(section_header.sh_type))
<< file_->GetPath() << " " << section_header.sh_type;
@@ -613,9 +800,13 @@
return section_header.sh_size / section_header.sh_entsize;
}
-Elf32_Sym& ElfFile::GetSymbol(Elf32_Word section_type,
+Elf32_Sym* ElfFile::GetSymbol(Elf32_Word section_type,
Elf32_Word i) const {
- return *(GetSymbolSectionStart(section_type) + i);
+ Elf32_Sym* sym_start = GetSymbolSectionStart(section_type);
+ if (sym_start == nullptr) {
+ return nullptr;
+ }
+ return sym_start + i;
}
ElfFile::SymbolTable** ElfFile::GetSymbolTable(Elf32_Word section_type) {
@@ -646,27 +837,37 @@
DCHECK(build_map);
*symbol_table = new SymbolTable;
Elf32_Shdr* symbol_section = FindSectionByType(section_type);
- CHECK(symbol_section != nullptr) << file_->GetPath();
- Elf32_Shdr& string_section = GetSectionHeader(symbol_section->sh_link);
+ if (symbol_section == nullptr) {
+ return nullptr; // Failure condition.
+ }
+ Elf32_Shdr* string_section = GetSectionHeader(symbol_section->sh_link);
+ if (string_section == nullptr) {
+ return nullptr; // Failure condition.
+ }
for (uint32_t i = 0; i < GetSymbolNum(*symbol_section); i++) {
- Elf32_Sym& symbol = GetSymbol(section_type, i);
- unsigned char type = ELF32_ST_TYPE(symbol.st_info);
+ Elf32_Sym* symbol = GetSymbol(section_type, i);
+ if (symbol == nullptr) {
+ return nullptr; // Failure condition.
+ }
+ unsigned char type = ELF32_ST_TYPE(symbol->st_info);
if (type == STT_NOTYPE) {
continue;
}
- const char* name = GetString(string_section, symbol.st_name);
+ const char* name = GetString(*string_section, symbol->st_name);
if (name == nullptr) {
continue;
}
std::pair<SymbolTable::iterator, bool> result =
- (*symbol_table)->insert(std::make_pair(name, &symbol));
+ (*symbol_table)->insert(std::make_pair(name, symbol));
if (!result.second) {
// If a duplicate, make sure it has the same logical value. Seen on x86.
- CHECK_EQ(symbol.st_value, result.first->second->st_value);
- CHECK_EQ(symbol.st_size, result.first->second->st_size);
- CHECK_EQ(symbol.st_info, result.first->second->st_info);
- CHECK_EQ(symbol.st_other, result.first->second->st_other);
- CHECK_EQ(symbol.st_shndx, result.first->second->st_shndx);
+ if ((symbol->st_value != result.first->second->st_value) ||
+ (symbol->st_size != result.first->second->st_size) ||
+ (symbol->st_info != result.first->second->st_info) ||
+ (symbol->st_other != result.first->second->st_other) ||
+ (symbol->st_shndx != result.first->second->st_shndx)) {
+ return nullptr; // Failure condition.
+ }
}
}
}
@@ -680,16 +881,24 @@
// Fall back to linear search
Elf32_Shdr* symbol_section = FindSectionByType(section_type);
- CHECK(symbol_section != nullptr) << file_->GetPath();
- Elf32_Shdr& string_section = GetSectionHeader(symbol_section->sh_link);
+ if (symbol_section == nullptr) {
+ return nullptr;
+ }
+ Elf32_Shdr* string_section = GetSectionHeader(symbol_section->sh_link);
+ if (string_section == nullptr) {
+ return nullptr;
+ }
for (uint32_t i = 0; i < GetSymbolNum(*symbol_section); i++) {
- Elf32_Sym& symbol = GetSymbol(section_type, i);
- const char* name = GetString(string_section, symbol.st_name);
+ Elf32_Sym* symbol = GetSymbol(section_type, i);
+ if (symbol == nullptr) {
+ return nullptr; // Failure condition.
+ }
+ const char* name = GetString(*string_section, symbol->st_name);
if (name == nullptr) {
continue;
}
if (symbol_name == name) {
- return &symbol;
+ return symbol;
}
}
return nullptr;
@@ -708,14 +917,20 @@
const char* ElfFile::GetString(Elf32_Shdr& string_section, Elf32_Word i) const {
CHECK(!program_header_only_) << file_->GetPath();
// TODO: remove this static_cast from enum when using -std=gnu++0x
- CHECK_EQ(static_cast<Elf32_Word>(SHT_STRTAB), string_section.sh_type) << file_->GetPath();
- CHECK_LT(i, string_section.sh_size) << file_->GetPath();
+ if (static_cast<Elf32_Word>(SHT_STRTAB) != string_section.sh_type) {
+ return nullptr; // Failure condition.
+ }
+ if (i >= string_section.sh_size) {
+ return nullptr;
+ }
if (i == 0) {
return nullptr;
}
byte* strings = Begin() + string_section.sh_offset;
byte* string = strings + i;
- CHECK_LT(string, End()) << file_->GetPath();
+ if (string >= End()) {
+ return nullptr;
+ }
return reinterpret_cast<const char*>(string);
}
@@ -785,15 +1000,15 @@
Elf32_Addr min_vaddr = 0xFFFFFFFFu;
Elf32_Addr max_vaddr = 0x00000000u;
for (Elf32_Word i = 0; i < GetProgramHeaderNum(); i++) {
- Elf32_Phdr& program_header = GetProgramHeader(i);
- if (program_header.p_type != PT_LOAD) {
+ Elf32_Phdr* program_header = GetProgramHeader(i);
+ if (program_header->p_type != PT_LOAD) {
continue;
}
- Elf32_Addr begin_vaddr = program_header.p_vaddr;
+ Elf32_Addr begin_vaddr = program_header->p_vaddr;
if (begin_vaddr < min_vaddr) {
min_vaddr = begin_vaddr;
}
- Elf32_Addr end_vaddr = program_header.p_vaddr + program_header.p_memsz;
+ Elf32_Addr end_vaddr = program_header->p_vaddr + program_header->p_memsz;
if (end_vaddr > max_vaddr) {
max_vaddr = end_vaddr;
}
@@ -843,16 +1058,21 @@
bool reserved = false;
for (Elf32_Word i = 0; i < GetProgramHeaderNum(); i++) {
- Elf32_Phdr& program_header = GetProgramHeader(i);
+ Elf32_Phdr* program_header = GetProgramHeader(i);
+ if (program_header == nullptr) {
+ *error_msg = StringPrintf("No program header for entry %d in ELF file %s.",
+ i, file_->GetPath().c_str());
+ return false;
+ }
// Record .dynamic header information for later use
- if (program_header.p_type == PT_DYNAMIC) {
- dynamic_program_header_ = &program_header;
+ if (program_header->p_type == PT_DYNAMIC) {
+ dynamic_program_header_ = program_header;
continue;
}
// Not something to load, move on.
- if (program_header.p_type != PT_LOAD) {
+ if (program_header->p_type != PT_LOAD) {
continue;
}
@@ -874,8 +1094,8 @@
}
size_t file_length = static_cast<size_t>(temp_file_length);
if (!reserved) {
- byte* reserve_base = ((program_header.p_vaddr != 0) ?
- reinterpret_cast<byte*>(program_header.p_vaddr) : nullptr);
+ byte* reserve_base = ((program_header->p_vaddr != 0) ?
+ reinterpret_cast<byte*>(program_header->p_vaddr) : nullptr);
std::string reservation_name("ElfFile reservation for ");
reservation_name += file_->GetPath();
std::unique_ptr<MemMap> reserve(MemMap::MapAnonymous(reservation_name.c_str(),
@@ -894,18 +1114,18 @@
segments_.push_back(reserve.release());
}
// empty segment, nothing to map
- if (program_header.p_memsz == 0) {
+ if (program_header->p_memsz == 0) {
continue;
}
- byte* p_vaddr = base_address_ + program_header.p_vaddr;
+ byte* p_vaddr = base_address_ + program_header->p_vaddr;
int prot = 0;
- if (executable && ((program_header.p_flags & PF_X) != 0)) {
+ if (executable && ((program_header->p_flags & PF_X) != 0)) {
prot |= PROT_EXEC;
}
- if ((program_header.p_flags & PF_W) != 0) {
+ if ((program_header->p_flags & PF_W) != 0) {
prot |= PROT_WRITE;
}
- if ((program_header.p_flags & PF_R) != 0) {
+ if ((program_header->p_flags & PF_R) != 0) {
prot |= PROT_READ;
}
int flags = 0;
@@ -915,17 +1135,17 @@
} else {
flags |= MAP_PRIVATE;
}
- if (file_length < (program_header.p_offset + program_header.p_memsz)) {
+ if (file_length < (program_header->p_offset + program_header->p_memsz)) {
*error_msg = StringPrintf("File size of %zd bytes not large enough to contain ELF segment "
"%d of %d bytes: '%s'", file_length, i,
- program_header.p_offset + program_header.p_memsz,
+ program_header->p_offset + program_header->p_memsz,
file_->GetPath().c_str());
return false;
}
std::unique_ptr<MemMap> segment(MemMap::MapFileAtAddress(p_vaddr,
- program_header.p_memsz,
+ program_header->p_memsz,
prot, flags, file_->Fd(),
- program_header.p_offset,
+ program_header->p_offset,
true, // implies MAP_FIXED
file_->GetPath().c_str(),
error_msg));
@@ -944,8 +1164,14 @@
}
// Now that we are done loading, .dynamic should be in memory to find .dynstr, .dynsym, .hash
- dynamic_section_start_
- = reinterpret_cast<Elf32_Dyn*>(base_address_ + GetDynamicProgramHeader().p_vaddr);
+ byte* dsptr = base_address_ + GetDynamicProgramHeader().p_vaddr;
+ if ((dsptr < Begin() || dsptr >= End()) && !ValidPointer(dsptr)) {
+ *error_msg = StringPrintf("dynamic section address invalid in ELF file %s",
+ file_->GetPath().c_str());
+ return false;
+ }
+ dynamic_section_start_ = reinterpret_cast<Elf32_Dyn*>(dsptr);
+
for (Elf32_Word i = 0; i < GetDynamicNum(); i++) {
Elf32_Dyn& elf_dyn = GetDynamic(i);
byte* d_ptr = base_address_ + elf_dyn.d_un.d_ptr;
@@ -989,6 +1215,11 @@
}
}
+ // Check for the existence of some sections.
+ if (!CheckSectionsExist(error_msg)) {
+ return false;
+ }
+
// Use GDB JIT support to do stack backtrace, etc.
if (executable) {
GdbJITSupport();
@@ -1010,15 +1241,21 @@
Elf32_Shdr* ElfFile::FindSectionByName(const std::string& name) const {
CHECK(!program_header_only_);
- Elf32_Shdr& shstrtab_sec = GetSectionNameStringSection();
+ Elf32_Shdr* shstrtab_sec = GetSectionNameStringSection();
+ if (shstrtab_sec == nullptr) {
+ return nullptr;
+ }
for (uint32_t i = 0; i < GetSectionHeaderNum(); i++) {
- Elf32_Shdr& shdr = GetSectionHeader(i);
- const char* sec_name = GetString(shstrtab_sec, shdr.sh_name);
+ Elf32_Shdr* shdr = GetSectionHeader(i);
+ if (shdr == nullptr) {
+ return nullptr;
+ }
+ const char* sec_name = GetString(*shstrtab_sec, shdr->sh_name);
if (sec_name == nullptr) {
continue;
}
if (name == sec_name) {
- return &shdr;
+ return shdr;
}
}
return nullptr;
@@ -1176,7 +1413,7 @@
}
private:
- explicit DebugTag(uint32_t index) : index_(index) {}
+ explicit DebugTag(uint32_t index) : index_(index), size_(0), tag_(0), has_child_(false) {}
void AddAttribute(uint32_t type, uint32_t attr_size) {
off_map_.insert(std::pair<uint32_t, uint32_t>(type, size_));
size_map_.insert(std::pair<uint32_t, uint32_t>(type, attr_size));
diff --git a/runtime/elf_file.h b/runtime/elf_file.h
index a966bd9..985be76 100644
--- a/runtime/elf_file.h
+++ b/runtime/elf_file.h
@@ -67,35 +67,20 @@
Elf32_Ehdr& GetHeader() const;
Elf32_Word GetProgramHeaderNum() const;
- Elf32_Phdr& GetProgramHeader(Elf32_Word) const;
- Elf32_Phdr* FindProgamHeaderByType(Elf32_Word type) const;
+ Elf32_Phdr* GetProgramHeader(Elf32_Word) const;
Elf32_Word GetSectionHeaderNum() const;
- Elf32_Shdr& GetSectionHeader(Elf32_Word) const;
+ Elf32_Shdr* GetSectionHeader(Elf32_Word) const;
Elf32_Shdr* FindSectionByType(Elf32_Word type) const;
Elf32_Shdr* FindSectionByName(const std::string& name) const;
- Elf32_Shdr& GetSectionNameStringSection() const;
+ Elf32_Shdr* GetSectionNameStringSection() const;
// Find .dynsym using .hash for more efficient lookup than FindSymbolAddress.
const byte* FindDynamicSymbolAddress(const std::string& symbol_name) const;
- const Elf32_Sym* FindDynamicSymbol(const std::string& symbol_name) const;
- static bool IsSymbolSectionType(Elf32_Word section_type);
Elf32_Word GetSymbolNum(Elf32_Shdr&) const;
- Elf32_Sym& GetSymbol(Elf32_Word section_type, Elf32_Word i) const;
-
- // Find symbol in specified table, returning nullptr if it is not found.
- //
- // If build_map is true, builds a map to speed repeated access. The
- // map does not included untyped symbol values (aka STT_NOTYPE)
- // since they can contain duplicates. If build_map is false, the map
- // will be used if it was already created. Typically build_map
- // should be set unless only a small number of symbols will be
- // looked up.
- Elf32_Sym* FindSymbolByName(Elf32_Word section_type,
- const std::string& symbol_name,
- bool build_map);
+ Elf32_Sym* GetSymbol(Elf32_Word section_type, Elf32_Word i) const;
// Find address of symbol in specified table, returning 0 if it is
// not found. See FindSymbolByName for an explanation of build_map.
@@ -107,13 +92,8 @@
// special 0 offset.
const char* GetString(Elf32_Shdr&, Elf32_Word) const;
- // Lookup a string by section type. Returns nullptr for special 0 offset.
- const char* GetString(Elf32_Word section_type, Elf32_Word) const;
-
Elf32_Word GetDynamicNum() const;
Elf32_Dyn& GetDynamic(Elf32_Word) const;
- Elf32_Dyn* FindDynamicByType(Elf32_Sword type) const;
- Elf32_Word FindDynamicValueByType(Elf32_Sword type) const;
Elf32_Word GetRelNum(Elf32_Shdr&) const;
Elf32_Rel& GetRel(Elf32_Shdr&, Elf32_Word) const;
@@ -146,14 +126,45 @@
Elf32_Word* GetHashSectionStart() const;
Elf32_Word GetHashBucketNum() const;
Elf32_Word GetHashChainNum() const;
- Elf32_Word GetHashBucket(size_t i) const;
- Elf32_Word GetHashChain(size_t i) const;
+ Elf32_Word GetHashBucket(size_t i, bool* ok) const;
+ Elf32_Word GetHashChain(size_t i, bool* ok) const;
typedef std::map<std::string, Elf32_Sym*> SymbolTable;
SymbolTable** GetSymbolTable(Elf32_Word section_type);
bool ValidPointer(const byte* start) const;
+ const Elf32_Sym* FindDynamicSymbol(const std::string& symbol_name) const;
+
+ // Check that certain sections and their dependencies exist.
+ bool CheckSectionsExist(std::string* error_msg) const;
+
+ // Check that the link of the first section links to the second section.
+ bool CheckSectionsLinked(const byte* source, const byte* target) const;
+
+ // Check whether the offset is in range, and set to target to Begin() + offset if OK.
+ bool CheckAndSet(Elf32_Off offset, const char* label, byte** target, std::string* error_msg);
+
+ // Find symbol in specified table, returning nullptr if it is not found.
+ //
+ // If build_map is true, builds a map to speed repeated access. The
+ // map does not included untyped symbol values (aka STT_NOTYPE)
+ // since they can contain duplicates. If build_map is false, the map
+ // will be used if it was already created. Typically build_map
+ // should be set unless only a small number of symbols will be
+ // looked up.
+ Elf32_Sym* FindSymbolByName(Elf32_Word section_type,
+ const std::string& symbol_name,
+ bool build_map);
+
+ Elf32_Phdr* FindProgamHeaderByType(Elf32_Word type) const;
+
+ Elf32_Dyn* FindDynamicByType(Elf32_Sword type) const;
+ Elf32_Word FindDynamicValueByType(Elf32_Sword type) const;
+
+ // Lookup a string by section type. Returns nullptr for special 0 offset.
+ const char* GetString(Elf32_Word section_type, Elf32_Word) const;
+
const File* const file_;
const bool writable_;
const bool program_header_only_;
diff --git a/runtime/gc/heap.cc b/runtime/gc/heap.cc
index 203aaff..dfe9e29 100644
--- a/runtime/gc/heap.cc
+++ b/runtime/gc/heap.cc
@@ -423,7 +423,7 @@
}
}
if (running_on_valgrind_) {
- Runtime::Current()->GetInstrumentation()->InstrumentQuickAllocEntryPoints();
+ Runtime::Current()->GetInstrumentation()->InstrumentQuickAllocEntryPoints(false);
}
if (VLOG_IS_ON(heap) || VLOG_IS_ON(startup)) {
LOG(INFO) << "Heap() exiting";
diff --git a/runtime/gc/reference_processor.cc b/runtime/gc/reference_processor.cc
index 637e3c9..bfaa2bb 100644
--- a/runtime/gc/reference_processor.cc
+++ b/runtime/gc/reference_processor.cc
@@ -30,8 +30,13 @@
ReferenceProcessor::ReferenceProcessor()
: process_references_args_(nullptr, nullptr, nullptr),
- preserving_references_(false), lock_("reference processor lock", kReferenceProcessorLock),
- condition_("reference processor condition", lock_) {
+ preserving_references_(false),
+ condition_("reference processor condition", *Locks::reference_processor_lock_) ,
+ soft_reference_queue_(Locks::reference_queue_soft_references_lock_),
+ weak_reference_queue_(Locks::reference_queue_weak_references_lock_),
+ finalizer_reference_queue_(Locks::reference_queue_finalizer_references_lock_),
+ phantom_reference_queue_(Locks::reference_queue_phantom_references_lock_),
+ cleared_references_(Locks::reference_queue_cleared_references_lock_) {
}
void ReferenceProcessor::EnableSlowPath() {
@@ -50,7 +55,7 @@
if (UNLIKELY(!SlowPathEnabled()) || referent == nullptr) {
return referent;
}
- MutexLock mu(self, lock_);
+ MutexLock mu(self, *Locks::reference_processor_lock_);
while (SlowPathEnabled()) {
mirror::HeapReference<mirror::Object>* const referent_addr =
reference->GetReferentReferenceAddr();
@@ -93,12 +98,12 @@
}
void ReferenceProcessor::StartPreservingReferences(Thread* self) {
- MutexLock mu(self, lock_);
+ MutexLock mu(self, *Locks::reference_processor_lock_);
preserving_references_ = true;
}
void ReferenceProcessor::StopPreservingReferences(Thread* self) {
- MutexLock mu(self, lock_);
+ MutexLock mu(self, *Locks::reference_processor_lock_);
preserving_references_ = false;
// We are done preserving references, some people who are blocked may see a marked referent.
condition_.Broadcast(self);
@@ -114,7 +119,7 @@
TimingLogger::ScopedTiming t(concurrent ? __FUNCTION__ : "(Paused)ProcessReferences", timings);
Thread* self = Thread::Current();
{
- MutexLock mu(self, lock_);
+ MutexLock mu(self, *Locks::reference_processor_lock_);
process_references_args_.is_marked_callback_ = is_marked_callback;
process_references_args_.mark_callback_ = mark_object_callback;
process_references_args_.arg_ = arg;
@@ -162,7 +167,7 @@
DCHECK(finalizer_reference_queue_.IsEmpty());
DCHECK(phantom_reference_queue_.IsEmpty());
{
- MutexLock mu(self, lock_);
+ MutexLock mu(self, *Locks::reference_processor_lock_);
// Need to always do this since the next GC may be concurrent. Doing this for only concurrent
// could result in a stale is_marked_callback_ being called before the reference processing
// starts since there is a small window of time where slow_path_enabled_ is enabled but the
@@ -224,5 +229,31 @@
}
}
+bool ReferenceProcessor::MakeCircularListIfUnenqueued(mirror::FinalizerReference* reference) {
+ Thread* self = Thread::Current();
+ MutexLock mu(self, *Locks::reference_processor_lock_);
+ // Wait untul we are done processing reference.
+ while (SlowPathEnabled()) {
+ condition_.Wait(self);
+ }
+ // At this point, since the sentinel of the reference is live, it is guaranteed to not be
+ // enqueued if we just finished processing references. Otherwise, we may be doing the main GC
+ // phase. Since we are holding the reference processor lock, it guarantees that reference
+ // processing can't begin. The GC could have just enqueued the reference one one of the internal
+ // GC queues, but since we hold the lock finalizer_reference_queue_ lock it also prevents this
+ // race.
+ MutexLock mu2(self, *Locks::reference_queue_finalizer_references_lock_);
+ if (!reference->IsEnqueued()) {
+ CHECK(reference->IsFinalizerReferenceInstance());
+ if (Runtime::Current()->IsActiveTransaction()) {
+ reference->SetPendingNext<true>(reference);
+ } else {
+ reference->SetPendingNext<false>(reference);
+ }
+ return true;
+ }
+ return false;
+}
+
} // namespace gc
} // namespace art
diff --git a/runtime/gc/reference_processor.h b/runtime/gc/reference_processor.h
index 7274457..5eb095b 100644
--- a/runtime/gc/reference_processor.h
+++ b/runtime/gc/reference_processor.h
@@ -28,6 +28,7 @@
class TimingLogger;
namespace mirror {
+class FinalizerReference;
class Object;
class Reference;
} // namespace mirror
@@ -48,20 +49,25 @@
ProcessMarkStackCallback* process_mark_stack_callback, void* arg)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_)
EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_)
- LOCKS_EXCLUDED(lock_);
+ LOCKS_EXCLUDED(Locks::reference_processor_lock_);
// The slow path bool is contained in the reference class object, can only be set once
// Only allow setting this with mutators suspended so that we can avoid using a lock in the
// GetReferent fast path as an optimization.
void EnableSlowPath() EXCLUSIVE_LOCKS_REQUIRED(Locks::mutator_lock_);
// Decode the referent, may block if references are being processed.
mirror::Object* GetReferent(Thread* self, mirror::Reference* reference)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) LOCKS_EXCLUDED(lock_);
+ SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) LOCKS_EXCLUDED(Locks::reference_processor_lock_);
void EnqueueClearedReferences(Thread* self) LOCKS_EXCLUDED(Locks::mutator_lock_);
void DelayReferenceReferent(mirror::Class* klass, mirror::Reference* ref,
IsHeapReferenceMarkedCallback* is_marked_callback, void* arg)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
void UpdateRoots(IsMarkedCallback* callback, void* arg)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_, Locks::heap_bitmap_lock_);
+ // Make a circular list with reference if it is not enqueued. Uses the finalizer queue lock.
+ bool MakeCircularListIfUnenqueued(mirror::FinalizerReference* reference)
+ SHARED_LOCKS_REQUIRED(Locks::mutator_lock_)
+ LOCKS_EXCLUDED(Locks::reference_processor_lock_,
+ Locks::reference_queue_finalizer_references_lock_);
private:
class ProcessReferencesArgs {
@@ -78,23 +84,21 @@
};
bool SlowPathEnabled() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
// Called by ProcessReferences.
- void DisableSlowPath(Thread* self) EXCLUSIVE_LOCKS_REQUIRED(lock_)
+ void DisableSlowPath(Thread* self) EXCLUSIVE_LOCKS_REQUIRED(Locks::reference_processor_lock_)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
// If we are preserving references it means that some dead objects may become live, we use start
// and stop preserving to block mutators using GetReferrent from getting access to these
// referents.
- void StartPreservingReferences(Thread* self) LOCKS_EXCLUDED(lock_);
- void StopPreservingReferences(Thread* self) LOCKS_EXCLUDED(lock_);
+ void StartPreservingReferences(Thread* self) LOCKS_EXCLUDED(Locks::reference_processor_lock_);
+ void StopPreservingReferences(Thread* self) LOCKS_EXCLUDED(Locks::reference_processor_lock_);
// Process args, used by the GetReferent to return referents which are already marked.
- ProcessReferencesArgs process_references_args_ GUARDED_BY(lock_);
+ ProcessReferencesArgs process_references_args_ GUARDED_BY(Locks::reference_processor_lock_);
// Boolean for whether or not we are preserving references (either soft references or finalizers).
// If this is true, then we cannot return a referent (see comment in GetReferent).
- bool preserving_references_ GUARDED_BY(lock_);
- // Lock that guards the reference processing.
- Mutex lock_ DEFAULT_MUTEX_ACQUIRED_AFTER;
+ bool preserving_references_ GUARDED_BY(Locks::reference_processor_lock_);
// Condition that people wait on if they attempt to get the referent of a reference while
// processing is in progress.
- ConditionVariable condition_ GUARDED_BY(lock_);
+ ConditionVariable condition_ GUARDED_BY(Locks::reference_processor_lock_);
// Reference queues used by the GC.
ReferenceQueue soft_reference_queue_;
ReferenceQueue weak_reference_queue_;
diff --git a/runtime/gc/reference_queue.cc b/runtime/gc/reference_queue.cc
index c3931e8..4003524 100644
--- a/runtime/gc/reference_queue.cc
+++ b/runtime/gc/reference_queue.cc
@@ -25,13 +25,12 @@
namespace art {
namespace gc {
-ReferenceQueue::ReferenceQueue()
- : lock_("reference queue lock"), list_(nullptr) {
+ReferenceQueue::ReferenceQueue(Mutex* lock) : lock_(lock), list_(nullptr) {
}
void ReferenceQueue::AtomicEnqueueIfNotEnqueued(Thread* self, mirror::Reference* ref) {
DCHECK(ref != NULL);
- MutexLock mu(self, lock_);
+ MutexLock mu(self, *lock_);
if (!ref->IsEnqueued()) {
EnqueuePendingReference(ref);
}
diff --git a/runtime/gc/reference_queue.h b/runtime/gc/reference_queue.h
index cd814bb..dbf4abc 100644
--- a/runtime/gc/reference_queue.h
+++ b/runtime/gc/reference_queue.h
@@ -44,7 +44,7 @@
// java.lang.ref.Reference objects.
class ReferenceQueue {
public:
- explicit ReferenceQueue();
+ explicit ReferenceQueue(Mutex* lock);
// Enqueue a reference if is not already enqueued. Thread safe to call from multiple threads
// since it uses a lock to avoid a race between checking for the references presence and adding
// it.
@@ -90,7 +90,7 @@
private:
// Lock, used for parallel GC reference enqueuing. It allows for multiple threads simultaneously
// calling AtomicEnqueueIfNotEnqueued.
- Mutex lock_ DEFAULT_MUTEX_ACQUIRED_AFTER;
+ Mutex* lock_;
// The actual reference list. Only a root for the mark compact GC since it will be null for other
// GC types.
mirror::Reference* list_;
diff --git a/runtime/instrumentation.cc b/runtime/instrumentation.cc
index 0f45b9e..a2e88a6 100644
--- a/runtime/instrumentation.cc
+++ b/runtime/instrumentation.cc
@@ -597,10 +597,13 @@
thread->ResetQuickAllocEntryPointsForThread();
}
-void Instrumentation::SetEntrypointsInstrumented(bool instrumented) {
+void Instrumentation::SetEntrypointsInstrumented(bool instrumented, bool suspended) {
Runtime* runtime = Runtime::Current();
ThreadList* tl = runtime->GetThreadList();
- if (runtime->IsStarted()) {
+ if (suspended) {
+ Locks::mutator_lock_->AssertExclusiveHeld(Thread::Current());
+ }
+ if (runtime->IsStarted() && !suspended) {
tl->SuspendAll();
}
{
@@ -608,30 +611,30 @@
SetQuickAllocEntryPointsInstrumented(instrumented);
ResetQuickAllocEntryPoints();
}
- if (runtime->IsStarted()) {
+ if (runtime->IsStarted() && !suspended) {
tl->ResumeAll();
}
}
-void Instrumentation::InstrumentQuickAllocEntryPoints() {
+void Instrumentation::InstrumentQuickAllocEntryPoints(bool suspended) {
// TODO: the read of quick_alloc_entry_points_instrumentation_counter_ is racey and this code
// should be guarded by a lock.
DCHECK_GE(quick_alloc_entry_points_instrumentation_counter_.LoadSequentiallyConsistent(), 0);
const bool enable_instrumentation =
quick_alloc_entry_points_instrumentation_counter_.FetchAndAddSequentiallyConsistent(1) == 0;
if (enable_instrumentation) {
- SetEntrypointsInstrumented(true);
+ SetEntrypointsInstrumented(true, suspended);
}
}
-void Instrumentation::UninstrumentQuickAllocEntryPoints() {
+void Instrumentation::UninstrumentQuickAllocEntryPoints(bool suspended) {
// TODO: the read of quick_alloc_entry_points_instrumentation_counter_ is racey and this code
// should be guarded by a lock.
DCHECK_GT(quick_alloc_entry_points_instrumentation_counter_.LoadSequentiallyConsistent(), 0);
const bool disable_instrumentation =
quick_alloc_entry_points_instrumentation_counter_.FetchAndSubSequentiallyConsistent(1) == 1;
if (disable_instrumentation) {
- SetEntrypointsInstrumented(false);
+ SetEntrypointsInstrumented(false, suspended);
}
}
diff --git a/runtime/instrumentation.h b/runtime/instrumentation.h
index d05cee5..3c1c756 100644
--- a/runtime/instrumentation.h
+++ b/runtime/instrumentation.h
@@ -182,10 +182,10 @@
return interpreter_handler_table_;
}
- void InstrumentQuickAllocEntryPoints() LOCKS_EXCLUDED(Locks::thread_list_lock_,
- Locks::runtime_shutdown_lock_);
- void UninstrumentQuickAllocEntryPoints() LOCKS_EXCLUDED(Locks::thread_list_lock_,
- Locks::runtime_shutdown_lock_);
+ void InstrumentQuickAllocEntryPoints(bool suspended)
+ LOCKS_EXCLUDED(Locks::thread_list_lock_, Locks::runtime_shutdown_lock_);
+ void UninstrumentQuickAllocEntryPoints(bool suspended)
+ LOCKS_EXCLUDED(Locks::thread_list_lock_, Locks::runtime_shutdown_lock_);
void ResetQuickAllocEntryPoints() EXCLUSIVE_LOCKS_REQUIRED(Locks::runtime_shutdown_lock_);
// Update the code of a method respecting any installed stubs.
@@ -350,7 +350,7 @@
// No thread safety analysis to get around SetQuickAllocEntryPointsInstrumented requiring
// exclusive access to mutator lock which you can't get if the runtime isn't started.
- void SetEntrypointsInstrumented(bool instrumented) NO_THREAD_SAFETY_ANALYSIS;
+ void SetEntrypointsInstrumented(bool instrumented, bool suspended) NO_THREAD_SAFETY_ANALYSIS;
void MethodEnterEventImpl(Thread* thread, mirror::Object* this_object,
mirror::ArtMethod* method, uint32_t dex_pc) const
diff --git a/runtime/native/dalvik_system_VMDebug.cc b/runtime/native/dalvik_system_VMDebug.cc
index ceff206..d8a537f 100644
--- a/runtime/native/dalvik_system_VMDebug.cc
+++ b/runtime/native/dalvik_system_VMDebug.cc
@@ -60,11 +60,11 @@
}
static void VMDebug_startAllocCounting(JNIEnv*, jclass) {
- Runtime::Current()->SetStatsEnabled(true);
+ Runtime::Current()->SetStatsEnabled(true, false);
}
static void VMDebug_stopAllocCounting(JNIEnv*, jclass) {
- Runtime::Current()->SetStatsEnabled(false);
+ Runtime::Current()->SetStatsEnabled(false, false);
}
static jint VMDebug_getAllocCount(JNIEnv*, jclass, jint kind) {
diff --git a/runtime/native/java_lang_ref_FinalizerReference.cc b/runtime/native/java_lang_ref_FinalizerReference.cc
new file mode 100644
index 0000000..ad48ec0
--- /dev/null
+++ b/runtime/native/java_lang_ref_FinalizerReference.cc
@@ -0,0 +1,40 @@
+/*
+ * Copyright (C) 2014 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "gc/heap.h"
+#include "gc/reference_processor.h"
+#include "jni_internal.h"
+#include "mirror/object-inl.h"
+#include "mirror/reference-inl.h"
+#include "scoped_fast_native_object_access.h"
+
+namespace art {
+
+static jboolean FinalizerReference_makeCircularListIfUnenqueued(JNIEnv* env, jobject javaThis) {
+ ScopedFastNativeObjectAccess soa(env);
+ mirror::FinalizerReference* const ref = soa.Decode<mirror::FinalizerReference*>(javaThis);
+ return Runtime::Current()->GetHeap()->GetReferenceProcessor()->MakeCircularListIfUnenqueued(ref);
+}
+
+static JNINativeMethod gMethods[] = {
+ NATIVE_METHOD(FinalizerReference, makeCircularListIfUnenqueued, "!()Z"),
+};
+
+void register_java_lang_ref_FinalizerReference(JNIEnv* env) {
+ REGISTER_NATIVE_METHODS("java/lang/ref/FinalizerReference");
+}
+
+} // namespace art
diff --git a/runtime/oat_file-inl.h b/runtime/oat_file-inl.h
index 97ca6b2..9570bb5 100644
--- a/runtime/oat_file-inl.h
+++ b/runtime/oat_file-inl.h
@@ -21,6 +21,39 @@
namespace art {
+inline const OatQuickMethodHeader* OatFile::OatMethod::GetOatQuickMethodHeader() const {
+ const void* code = mirror::ArtMethod::EntryPointToCodePointer(GetQuickCode());
+ if (code == nullptr) {
+ return nullptr;
+ }
+ // Return a pointer to the packed struct before the code.
+ return reinterpret_cast<const OatQuickMethodHeader*>(code) - 1;
+}
+
+inline uint32_t OatFile::OatMethod::GetOatQuickMethodHeaderOffset() const {
+ const OatQuickMethodHeader* method_header = GetOatQuickMethodHeader();
+ if (method_header == nullptr) {
+ return 0u;
+ }
+ return reinterpret_cast<const byte*>(method_header) - begin_;
+}
+
+inline uint32_t OatFile::OatMethod::GetQuickCodeSize() const {
+ const void* code = mirror::ArtMethod::EntryPointToCodePointer(GetQuickCode());
+ if (code == nullptr) {
+ return 0u;
+ }
+ return reinterpret_cast<const OatQuickMethodHeader*>(code)[-1].code_size_;
+}
+
+inline uint32_t OatFile::OatMethod::GetQuickCodeSizeOffset() const {
+ const OatQuickMethodHeader* method_header = GetOatQuickMethodHeader();
+ if (method_header == nullptr) {
+ return 0u;
+ }
+ return reinterpret_cast<const byte*>(&method_header->code_size_) - begin_;
+}
+
inline size_t OatFile::OatMethod::GetFrameSizeInBytes() const {
const void* code = mirror::ArtMethod::EntryPointToCodePointer(GetQuickCode());
if (code == nullptr) {
@@ -50,11 +83,27 @@
return static_cast<uint32_t>(mapping_table != nullptr ? mapping_table - begin_ : 0u);
}
+inline uint32_t OatFile::OatMethod::GetMappingTableOffsetOffset() const {
+ const OatQuickMethodHeader* method_header = GetOatQuickMethodHeader();
+ if (method_header == nullptr) {
+ return 0u;
+ }
+ return reinterpret_cast<const byte*>(&method_header->mapping_table_offset_) - begin_;
+}
+
inline uint32_t OatFile::OatMethod::GetVmapTableOffset() const {
const uint8_t* vmap_table = GetVmapTable();
return static_cast<uint32_t>(vmap_table != nullptr ? vmap_table - begin_ : 0u);
}
+inline uint32_t OatFile::OatMethod::GetVmapTableOffsetOffset() const {
+ const OatQuickMethodHeader* method_header = GetOatQuickMethodHeader();
+ if (method_header == nullptr) {
+ return 0u;
+ }
+ return reinterpret_cast<const byte*>(&method_header->vmap_table_offset_) - begin_;
+}
+
inline const uint8_t* OatFile::OatMethod::GetMappingTable() const {
const void* code = mirror::ArtMethod::EntryPointToCodePointer(GetQuickCode());
if (code == nullptr) {
diff --git a/runtime/oat_file.cc b/runtime/oat_file.cc
index d5142a7..1fdca2f 100644
--- a/runtime/oat_file.cc
+++ b/runtime/oat_file.cc
@@ -19,6 +19,7 @@
#include <dlfcn.h>
#include <sstream>
#include <string.h>
+#include <unistd.h>
#include "base/bit_vector.h"
#include "base/stl_util.h"
@@ -91,6 +92,10 @@
return nullptr;
}
ret.reset(OpenElfFile(file.get(), location, requested_base, false, executable, error_msg));
+
+ // It would be nice to unlink here. But we might have opened the file created by the
+ // ScopedLock, which we better not delete to avoid races. TODO: Investigate how to fix the API
+ // to allow removal when we know the ELF must be borked.
}
return ret.release();
}
@@ -449,8 +454,12 @@
dex_file_location_checksum_, error_msg);
}
+uint32_t OatFile::OatDexFile::GetOatClassOffset(uint16_t class_def_index) const {
+ return oat_class_offsets_pointer_[class_def_index];
+}
+
OatFile::OatClass OatFile::OatDexFile::GetOatClass(uint16_t class_def_index) const {
- uint32_t oat_class_offset = oat_class_offsets_pointer_[class_def_index];
+ uint32_t oat_class_offset = GetOatClassOffset(class_def_index);
const byte* oat_class_pointer = oat_file_->Begin() + oat_class_offset;
CHECK_LT(oat_class_pointer, oat_file_->End()) << oat_file_->GetLocation();
@@ -523,32 +532,48 @@
}
}
-const OatFile::OatMethod OatFile::OatClass::GetOatMethod(uint32_t method_index) const {
+uint32_t OatFile::OatClass::GetOatMethodOffsetsOffset(uint32_t method_index) const {
+ const OatMethodOffsets* oat_method_offsets = GetOatMethodOffsets(method_index);
+ if (oat_method_offsets == nullptr) {
+ return 0u;
+ }
+ return reinterpret_cast<const uint8_t*>(oat_method_offsets) - oat_file_->Begin();
+}
+
+const OatMethodOffsets* OatFile::OatClass::GetOatMethodOffsets(uint32_t method_index) const {
// NOTE: We don't keep the number of methods and cannot do a bounds check for method_index.
- if (methods_pointer_ == NULL) {
+ if (methods_pointer_ == nullptr) {
CHECK_EQ(kOatClassNoneCompiled, type_);
- return OatMethod(NULL, 0, 0);
+ return nullptr;
}
size_t methods_pointer_index;
- if (bitmap_ == NULL) {
+ if (bitmap_ == nullptr) {
CHECK_EQ(kOatClassAllCompiled, type_);
methods_pointer_index = method_index;
} else {
CHECK_EQ(kOatClassSomeCompiled, type_);
if (!BitVector::IsBitSet(bitmap_, method_index)) {
- return OatMethod(NULL, 0, 0);
+ return nullptr;
}
size_t num_set_bits = BitVector::NumSetBits(bitmap_, method_index);
methods_pointer_index = num_set_bits;
}
const OatMethodOffsets& oat_method_offsets = methods_pointer_[methods_pointer_index];
+ return &oat_method_offsets;
+}
+
+const OatFile::OatMethod OatFile::OatClass::GetOatMethod(uint32_t method_index) const {
+ const OatMethodOffsets* oat_method_offsets = GetOatMethodOffsets(method_index);
+ if (oat_method_offsets == nullptr) {
+ return OatMethod(nullptr, 0, 0);
+ }
if (oat_file_->IsExecutable() ||
Runtime::Current() == nullptr || // This case applies for oatdump.
Runtime::Current()->IsCompiler()) {
return OatMethod(
oat_file_->Begin(),
- oat_method_offsets.code_offset_,
- oat_method_offsets.gc_map_offset_);
+ oat_method_offsets->code_offset_,
+ oat_method_offsets->gc_map_offset_);
} else {
// We aren't allowed to use the compiled code. We just force it down the interpreted version.
return OatMethod(oat_file_->Begin(), 0, 0);
@@ -565,17 +590,6 @@
OatFile::OatMethod::~OatMethod() {}
-
-uint32_t OatFile::OatMethod::GetQuickCodeSize() const {
- uintptr_t code = reinterpret_cast<uintptr_t>(GetQuickCode());
- if (code == 0) {
- return 0;
- }
- // TODO: make this Thumb2 specific
- code &= ~0x1;
- return reinterpret_cast<uint32_t*>(code)[-1];
-}
-
void OatFile::OatMethod::LinkMethod(mirror::ArtMethod* method) const {
CHECK(method != NULL);
method->SetEntryPointFromPortableCompiledCode(GetPortableCode());
diff --git a/runtime/oat_file.h b/runtime/oat_file.h
index d8b48a9..e5cd6ec 100644
--- a/runtime/oat_file.h
+++ b/runtime/oat_file.h
@@ -114,13 +114,22 @@
}
}
+ // Returns 0.
uint32_t GetPortableCodeSize() const {
// TODO: With Quick, we store the size before the code. With Portable, the code is in a .o
// file we don't manage ourselves. ELF symbols do have a concept of size, so we could capture
// that and store it somewhere, such as the OatMethod.
return 0;
}
+
+ // Returns size of quick code.
uint32_t GetQuickCodeSize() const;
+ uint32_t GetQuickCodeSizeOffset() const;
+
+ // Returns OatQuickMethodHeader for debugging. Most callers should
+ // use more specific methods such as GetQuickCodeSize.
+ const OatQuickMethodHeader* GetOatQuickMethodHeader() const;
+ uint32_t GetOatQuickMethodHeaderOffset() const;
const uint8_t* GetNativeGcMap() const {
return GetOatPointer<const uint8_t*>(native_gc_map_offset_);
@@ -129,10 +138,14 @@
size_t GetFrameSizeInBytes() const;
uint32_t GetCoreSpillMask() const;
uint32_t GetFpSpillMask() const;
- uint32_t GetMappingTableOffset() const;
- uint32_t GetVmapTableOffset() const;
+
const uint8_t* GetMappingTable() const;
+ uint32_t GetMappingTableOffset() const;
+ uint32_t GetMappingTableOffsetOffset() const;
+
const uint8_t* GetVmapTable() const;
+ uint32_t GetVmapTableOffset() const;
+ uint32_t GetVmapTableOffsetOffset() const;
~OatMethod();
@@ -171,11 +184,21 @@
}
// Get the OatMethod entry based on its index into the class
- // defintion. direct methods come first, followed by virtual
- // methods. note that runtime created methods such as miranda
+ // defintion. Direct methods come first, followed by virtual
+ // methods. Note that runtime created methods such as miranda
// methods are not included.
const OatMethod GetOatMethod(uint32_t method_index) const;
+ // Return a pointer to the OatMethodOffsets for the requested
+ // method_index, or nullptr if none is present. Note that most
+ // callers should use GetOatMethod.
+ const OatMethodOffsets* GetOatMethodOffsets(uint32_t method_index) const;
+
+ // Return the offset from the start of the OatFile to the
+ // OatMethodOffsets for the requested method_index, or 0 if none
+ // is present. Note that most callers should use GetOatMethod.
+ uint32_t GetOatMethodOffsetsOffset(uint32_t method_index) const;
+
OatClass() {}
private:
@@ -229,6 +252,9 @@
// Returns the OatClass for the class specified by the given DexFile class_def_index.
OatClass GetOatClass(uint16_t class_def_index) const;
+ // Returns the offset to the OatClass information. Most callers should use GetOatClass.
+ uint32_t GetOatClassOffset(uint16_t class_def_index) const;
+
~OatDexFile();
private:
diff --git a/runtime/runtime.cc b/runtime/runtime.cc
index 17de883d..60d641e 100644
--- a/runtime/runtime.cc
+++ b/runtime/runtime.cc
@@ -959,6 +959,7 @@
REGISTER(register_java_lang_System);
REGISTER(register_java_lang_Thread);
REGISTER(register_java_lang_VMClassLoader);
+ REGISTER(register_java_lang_ref_FinalizerReference);
REGISTER(register_java_lang_ref_Reference);
REGISTER(register_java_lang_reflect_Array);
REGISTER(register_java_lang_reflect_Constructor);
@@ -997,14 +998,14 @@
}
}
-void Runtime::SetStatsEnabled(bool new_state) {
+void Runtime::SetStatsEnabled(bool new_state, bool suspended) {
if (new_state == true) {
GetStats()->Clear(~0);
// TODO: wouldn't it make more sense to clear _all_ threads' stats?
Thread::Current()->GetStats()->Clear(~0);
- GetInstrumentation()->InstrumentQuickAllocEntryPoints();
+ GetInstrumentation()->InstrumentQuickAllocEntryPoints(suspended);
} else {
- GetInstrumentation()->UninstrumentQuickAllocEntryPoints();
+ GetInstrumentation()->UninstrumentQuickAllocEntryPoints(suspended);
}
stats_enabled_ = new_state;
}
diff --git a/runtime/runtime.h b/runtime/runtime.h
index a868e2c..96924ec 100644
--- a/runtime/runtime.h
+++ b/runtime/runtime.h
@@ -385,7 +385,7 @@
void ResetStats(int kinds);
- void SetStatsEnabled(bool new_state);
+ void SetStatsEnabled(bool new_state, bool suspended);
enum class NativeBridgeAction { // private
kUnload,
diff --git a/runtime/thread.cc b/runtime/thread.cc
index 10688ff..9b37eb7 100644
--- a/runtime/thread.cc
+++ b/runtime/thread.cc
@@ -942,7 +942,6 @@
std::ostream& os;
const Thread* thread;
const bool can_allocate;
- mirror::ArtMethod* method;
mirror::ArtMethod* last_method;
int last_line_number;
int repetition_count;
@@ -993,7 +992,7 @@
// If we're currently in native code, dump that stack before dumping the managed stack.
if (dump_for_abort || ShouldShowNativeStack(this)) {
DumpKernelStack(os, GetTid(), " kernel: ", false);
- DumpNativeStack(os, GetTid(), " native: ", GetCurrentMethod(nullptr));
+ DumpNativeStack(os, GetTid(), " native: ", GetCurrentMethod(nullptr, !dump_for_abort));
}
DumpJavaStack(os);
} else {
diff --git a/runtime/thread_list.cc b/runtime/thread_list.cc
index fc687dc..9a58942 100644
--- a/runtime/thread_list.cc
+++ b/runtime/thread_list.cc
@@ -833,13 +833,19 @@
// thread_suspend_count_lock_ so that the unregistering thread cannot be suspended.
// Note: deliberately not using MutexLock that could hold a stale self pointer.
Locks::thread_list_lock_->ExclusiveLock(self);
- CHECK(Contains(self));
- // Note: we don't take the thread_suspend_count_lock_ here as to be suspending a thread other
- // than yourself you need to hold the thread_list_lock_ (see Thread::ModifySuspendCount).
- if (!self->IsSuspended()) {
- list_.remove(self);
- delete self;
+ if (!Contains(self)) {
+ std::ostringstream os;
+ DumpNativeStack(os, GetTid(), " native: ", nullptr);
+ LOG(ERROR) << "Request to unregister unattached thread\n" << os.str();
self = nullptr;
+ } else {
+ // Note: we don't take the thread_suspend_count_lock_ here as to be suspending a thread other
+ // than yourself you need to hold the thread_list_lock_ (see Thread::ModifySuspendCount).
+ if (!self->IsSuspended()) {
+ list_.remove(self);
+ delete self;
+ self = nullptr;
+ }
}
Locks::thread_list_lock_->ExclusiveUnlock(self);
}
diff --git a/runtime/trace.cc b/runtime/trace.cc
index 6dcc5fe..b32e042 100644
--- a/runtime/trace.cc
+++ b/runtime/trace.cc
@@ -373,11 +373,9 @@
// Enable count of allocs if specified in the flags.
if ((flags && kTraceCountAllocs) != 0) {
- runtime->SetStatsEnabled(true);
+ runtime->SetStatsEnabled(true, true);
}
-
-
if (sampling_enabled) {
CHECK_PTHREAD_CALL(pthread_create, (&sampling_pthread_, NULL, &RunSamplingThread,
reinterpret_cast<void*>(interval_us)),
@@ -492,7 +490,7 @@
size_t final_offset = cur_offset_.LoadRelaxed();
if ((flags_ & kTraceCountAllocs) != 0) {
- Runtime::Current()->SetStatsEnabled(false);
+ Runtime::Current()->SetStatsEnabled(false, true);
}
std::set<mirror::ArtMethod*> visited_methods;
diff --git a/runtime/verifier/method_verifier.cc b/runtime/verifier/method_verifier.cc
index c65b02f..1720e18 100644
--- a/runtime/verifier/method_verifier.cc
+++ b/runtime/verifier/method_verifier.cc
@@ -3323,8 +3323,8 @@
}
mirror::ArtMethod* MethodVerifier::VerifyInvokeVirtualQuickArgs(const Instruction* inst,
- bool is_range) {
- DCHECK(Runtime::Current()->IsStarted());
+ bool is_range) {
+ DCHECK(Runtime::Current()->IsStarted() || verify_to_dump_);
mirror::ArtMethod* res_method = GetQuickInvokedMethod(inst, work_line_.get(),
is_range);
if (res_method == nullptr) {
@@ -3840,7 +3840,7 @@
void MethodVerifier::VerifyIGetQuick(const Instruction* inst, RegType& insn_type,
bool is_primitive) {
- DCHECK(Runtime::Current()->IsStarted());
+ DCHECK(Runtime::Current()->IsStarted() || verify_to_dump_);
mirror::ArtField* field = GetQuickFieldAccess(inst, work_line_.get());
if (field == nullptr) {
Fail(VERIFY_ERROR_BAD_CLASS_HARD) << "Cannot infer field from " << inst->Name();
@@ -3900,7 +3900,7 @@
void MethodVerifier::VerifyIPutQuick(const Instruction* inst, RegType& insn_type,
bool is_primitive) {
- DCHECK(Runtime::Current()->IsStarted());
+ DCHECK(Runtime::Current()->IsStarted() || verify_to_dump_);
mirror::ArtField* field = GetQuickFieldAccess(inst, work_line_.get());
if (field == nullptr) {
Fail(VERIFY_ERROR_BAD_CLASS_HARD) << "Cannot infer field from " << inst->Name();