Merge "Add CompiledCode for method and invoke stub." into ics-mr1-plus-art
diff --git a/src/compiled_method.cc b/src/compiled_method.cc
index 4b3cdee..b0285fd 100644
--- a/src/compiled_method.cc
+++ b/src/compiled_method.cc
@@ -18,6 +18,60 @@
namespace art {
+uint32_t CompiledCode::AlignCode(uint32_t offset) const {
+ return AlignCode(offset, instruction_set_);
+}
+
+uint32_t CompiledCode::AlignCode(uint32_t offset, InstructionSet instruction_set) {
+ switch (instruction_set) {
+ case kArm:
+ case kThumb2:
+ return RoundUp(offset, kArmAlignment);
+ case kMips:
+ return RoundUp(offset, kMipsAlignment);
+ case kX86:
+ return RoundUp(offset, kX86Alignment);
+ default:
+ LOG(FATAL) << "Unknown InstructionSet: " << instruction_set;
+ return 0;
+ }
+}
+
+size_t CompiledCode::CodeDelta() const {
+ switch (instruction_set_) {
+ case kArm:
+ case kMips:
+ case kX86:
+ return 0;
+ case kThumb2: {
+ // +1 to set the low-order bit so a BLX will switch to Thumb mode
+ return 1;
+ }
+ default:
+ LOG(FATAL) << "Unknown InstructionSet: " << instruction_set_;
+ return 0;
+ }
+}
+
+const void* CompiledCode::CodePointer(const void* code_pointer,
+ InstructionSet instruction_set) {
+ switch (instruction_set) {
+ case kArm:
+ case kMips:
+ case kX86:
+ return code_pointer;
+ case kThumb2: {
+ uintptr_t address = reinterpret_cast<uintptr_t>(code_pointer);
+ // Set the low-order bit so a BLX will switch to Thumb mode
+ address |= 0x1;
+ return reinterpret_cast<const void*>(address);
+ }
+ default:
+ LOG(FATAL) << "Unknown InstructionSet: " << instruction_set;
+ return NULL;
+ }
+}
+
CompiledMethod::CompiledMethod(InstructionSet instruction_set,
const std::vector<uint8_t>& code,
const size_t frame_size_in_bytes,
@@ -25,9 +79,8 @@
const uint32_t fp_spill_mask,
const std::vector<uint32_t>& mapping_table,
const std::vector<uint16_t>& vmap_table)
- : instruction_set_(instruction_set), frame_size_in_bytes_(frame_size_in_bytes),
- core_spill_mask_(core_spill_mask), fp_spill_mask_(fp_spill_mask),
- elf_idx_(-1)
+ : CompiledCode(instruction_set), frame_size_in_bytes_(frame_size_in_bytes),
+ core_spill_mask_(core_spill_mask), fp_spill_mask_(fp_spill_mask)
{
CHECK_NE(code.size(), 0U);
DCHECK_EQ(vmap_table.size(),
@@ -54,7 +107,7 @@
DCHECK_EQ(vmap_table.size() + 1, length_prefixed_vmap_table.size());
DCHECK_EQ(vmap_table.size(), length_prefixed_vmap_table[0]);
- code_ = byte_code;
+ SetCode(byte_code);
mapping_table_ = length_prefixed_mapping_table;
vmap_table_ = length_prefixed_vmap_table;
DCHECK_EQ(vmap_table_[0], static_cast<uint32_t>(__builtin_popcount(core_spill_mask) + __builtin_popcount(fp_spill_mask)));
@@ -76,31 +129,13 @@
const size_t frame_size_in_bytes,
const uint32_t core_spill_mask,
const uint32_t fp_spill_mask)
- : instruction_set_(instruction_set), code_(code), frame_size_in_bytes_(frame_size_in_bytes),
- core_spill_mask_(core_spill_mask), fp_spill_mask_(fp_spill_mask),
- elf_idx_(-1)
-{
- CHECK_NE(code.size(), 0U);
-}
-
-CompiledMethod::CompiledMethod(InstructionSet instruction_set,
- const uint16_t elf_idx,
- const uint16_t elf_func_idx)
- : instruction_set_(instruction_set), frame_size_in_bytes_(0),
- core_spill_mask_(0), fp_spill_mask_(0), elf_idx_(elf_idx),
- elf_func_idx_(elf_func_idx) {
+ : CompiledCode(instruction_set, code),
+ frame_size_in_bytes_(frame_size_in_bytes),
+ core_spill_mask_(core_spill_mask), fp_spill_mask_(fp_spill_mask) {
}
CompiledMethod::~CompiledMethod() {}
-InstructionSet CompiledMethod::GetInstructionSet() const {
- return instruction_set_;
-}
-
-const std::vector<uint8_t>& CompiledMethod::GetCode() const {
- return code_;
-}
-
size_t CompiledMethod::GetFrameSizeInBytes() const {
return frame_size_in_bytes_;
}
@@ -125,76 +160,15 @@
return gc_map_;
}
-uint32_t CompiledMethod::AlignCode(uint32_t offset) const {
- return AlignCode(offset, instruction_set_);
+CompiledInvokeStub::CompiledInvokeStub(InstructionSet instruction_set)
+ : CompiledCode(instruction_set) {
}
-uint32_t CompiledMethod::AlignCode(uint32_t offset, InstructionSet instruction_set) {
- switch (instruction_set) {
- case kArm:
- case kThumb2:
- return RoundUp(offset, kArmAlignment);
- case kMips:
- return RoundUp(offset, kMipsAlignment);
- case kX86:
- return RoundUp(offset, kX86Alignment);
- default:
- LOG(FATAL) << "Unknown InstructionSet: " << instruction_set;
- return 0;
- }
-}
-
-size_t CompiledMethod::CodeDelta() const {
- switch (instruction_set_) {
- case kArm:
- case kMips:
- case kX86:
- return 0;
- case kThumb2: {
- // +1 to set the low-order bit so a BLX will switch to Thumb mode
- return 1;
- }
- default:
- LOG(FATAL) << "Unknown InstructionSet: " << instruction_set_;
- return 0;
- }
-}
-
-const void* CompiledMethod::CodePointer(const void* code_pointer,
- InstructionSet instruction_set) {
- switch (instruction_set) {
- case kArm:
- case kMips:
- case kX86:
- return code_pointer;
- case kThumb2: {
- uintptr_t address = reinterpret_cast<uintptr_t>(code_pointer);
- // Set the low-order bit so a BLX will switch to Thumb mode
- address |= 0x1;
- return reinterpret_cast<const void*>(address);
- }
- default:
- LOG(FATAL) << "Unknown InstructionSet: " << instruction_set;
- return NULL;
- }
-}
-
-#if defined(ART_USE_LLVM_COMPILER)
-CompiledInvokeStub::CompiledInvokeStub(uint16_t elf_idx, uint16_t elf_func_idx)
- : elf_idx_(elf_idx), elf_func_idx_(elf_func_idx) {
-}
-#endif
-
-CompiledInvokeStub::CompiledInvokeStub(std::vector<uint8_t>& code)
- : elf_idx_(-1), elf_func_idx_(-1) {
- CHECK_NE(code.size(), 0U);
- code_ = code;
+CompiledInvokeStub::CompiledInvokeStub(InstructionSet instruction_set,
+ const std::vector<uint8_t>& code)
+ : CompiledCode(instruction_set, code) {
}
CompiledInvokeStub::~CompiledInvokeStub() {}
-const std::vector<uint8_t>& CompiledInvokeStub::GetCode() const {
- return code_;
-}
-
} // namespace art
diff --git a/src/compiled_method.h b/src/compiled_method.h
index 3987a5b..7ddde0c 100644
--- a/src/compiled_method.h
+++ b/src/compiled_method.h
@@ -29,7 +29,79 @@
namespace art {
-class CompiledMethod {
+class CompiledCode {
+ public:
+ CompiledCode(InstructionSet instruction_set)
+ : instruction_set_(instruction_set), elf_idx_(-1), elf_func_idx_(-1) {
+ }
+
+ CompiledCode(InstructionSet instruction_set, const std::vector<uint8_t>& code)
+ : instruction_set_(instruction_set), code_(code), elf_idx_(-1),
+ elf_func_idx_(-1) {
+ CHECK_NE(code.size(), 0U);
+ }
+
+ CompiledCode(InstructionSet instruction_set,
+ uint16_t elf_idx,
+ uint16_t elf_func_idx)
+ : instruction_set_(instruction_set), elf_idx_(elf_idx),
+ elf_func_idx_(elf_func_idx) {
+ }
+
+ InstructionSet GetInstructionSet() const {
+ return instruction_set_;
+ }
+
+ const std::vector<uint8_t>& GetCode() const {
+ return code_;
+ }
+
+ void SetCode(const std::vector<uint8_t>& code) {
+ code_ = code;
+ }
+
+ bool operator==(const CompiledCode& rhs) const {
+ return (code_ == rhs.code_);
+ }
+
+ bool IsExecutableInElf() const {
+ return (elf_idx_ != static_cast<uint16_t>(-1u));
+ }
+
+ uint16_t GetElfIndex() const {
+ return elf_idx_;
+ }
+
+ uint16_t GetElfFuncIndex() const {
+ return elf_func_idx_;
+ }
+
+ // To align an offset from a page-aligned value to make it suitable
+ // for code storage. For example on ARM, to ensure that PC relative
+ // valu computations work out as expected.
+ uint32_t AlignCode(uint32_t offset) const;
+ static uint32_t AlignCode(uint32_t offset, InstructionSet instruction_set);
+
+ // returns the difference between the code address and a usable PC.
+ // mainly to cope with kThumb2 where the lower bit must be set.
+ size_t CodeDelta() const;
+
+ // Returns a pointer suitable for invoking the code at the argument
+ // code_pointer address. Mainly to cope with kThumb2 where the
+ // lower bit must be set to indicate Thumb mode.
+ static const void* CodePointer(const void* code_pointer,
+ InstructionSet instruction_set);
+
+ private:
+ const InstructionSet instruction_set_;
+ std::vector<uint8_t> code_;
+
+ // LLVM-specific fields
+ uint16_t elf_idx_;
+ uint16_t elf_func_idx_;
+};
+
+class CompiledMethod : public CompiledCode {
public:
// Constructs a CompiledMethod for the non-LLVM compilers.
CompiledMethod(InstructionSet instruction_set,
@@ -52,13 +124,15 @@
// Constructs a CompiledMethod for the LLVM compiler.
CompiledMethod(InstructionSet instruction_set,
- const uint16_t elf_idx,
- const uint16_t elf_func_idx);
+ uint16_t elf_idx,
+ uint16_t elf_func_idx)
+ : CompiledCode(instruction_set, elf_idx, elf_func_idx),
+ frame_size_in_bytes_(kStackAlignment), core_spill_mask_(0),
+ fp_spill_mask_(0) {
+ }
~CompiledMethod();
- InstructionSet GetInstructionSet() const;
- const std::vector<uint8_t>& GetCode() const;
size_t GetFrameSizeInBytes() const;
uint32_t GetCoreSpillMask() const;
uint32_t GetFpSpillMask() const;
@@ -72,79 +146,29 @@
}
#endif
- // Aligns an offset from a page aligned value to make it suitable
- // for code storage. important to ensure that PC relative value
- // computations work out as expected on ARM.
- uint32_t AlignCode(uint32_t offset) const;
- static uint32_t AlignCode(uint32_t offset, InstructionSet instruction_set);
-
- // returns the difference between the code address and a usable PC.
- // mainly to cope with kThumb2 where the lower bit must be set.
- size_t CodeDelta() const;
-
- // Returns a pointer suitable for invoking the code at the argument
- // code_pointer address. Mainly to cope with kThumb2 where the
- // lower bit must be set to indicate Thumb mode.
- static const void* CodePointer(const void* code_pointer,
- InstructionSet instruction_set);
-
- uint16_t GetElfIndex() const {
- DCHECK(IsExecutableInElf());
- return elf_idx_;
- }
-
- uint16_t GetElfFuncIndex() const {
- DCHECK(IsExecutableInElf());
- return elf_func_idx_;
- }
-
- bool IsExecutableInElf() const {
- return (elf_idx_ != static_cast<uint16_t>(-1u));
- }
-
private:
- // For non-LLVM
- const InstructionSet instruction_set_;
- std::vector<uint8_t> code_;
size_t frame_size_in_bytes_;
const uint32_t core_spill_mask_;
const uint32_t fp_spill_mask_;
std::vector<uint32_t> mapping_table_;
std::vector<uint16_t> vmap_table_;
std::vector<uint8_t> gc_map_;
- // For LLVM
- uint16_t elf_idx_;
- uint16_t elf_func_idx_;
};
-class CompiledInvokeStub {
+class CompiledInvokeStub : public CompiledCode {
public:
- explicit CompiledInvokeStub(std::vector<uint8_t>& code);
-#if defined(ART_USE_LLVM_COMPILER)
- explicit CompiledInvokeStub(uint16_t elf_idx, uint16_t elf_func_idx);
-#endif
+ explicit CompiledInvokeStub(InstructionSet instruction_set);
+
+ explicit CompiledInvokeStub(InstructionSet instruction_set,
+ const std::vector<uint8_t>& code);
+
+ explicit CompiledInvokeStub(InstructionSet instruction_set,
+ uint16_t elf_idx,
+ uint16_t elf_func_idx)
+ : CompiledCode(instruction_set, elf_idx, elf_func_idx) {
+ }
+
~CompiledInvokeStub();
-
- const std::vector<uint8_t>& GetCode() const;
-
- uint16_t GetElfIndex() const {
- DCHECK(IsExecutableInElf());
- return elf_idx_;
- }
-
- uint16_t GetElfFuncIndex() const {
- DCHECK(IsExecutableInElf());
- return elf_func_idx_;
- }
-
- bool IsExecutableInElf() const {
- return (elf_idx_ != static_cast<uint16_t>(-1u));
- }
-
- private:
- std::vector<uint8_t> code_;
- uint16_t elf_idx_;
- uint16_t elf_func_idx_;
};
} // namespace art
diff --git a/src/compiler_llvm/stub_compiler.cc b/src/compiler_llvm/stub_compiler.cc
index 991dbe6..0d64f0d 100644
--- a/src/compiler_llvm/stub_compiler.cc
+++ b/src/compiler_llvm/stub_compiler.cc
@@ -193,7 +193,8 @@
// store ret_addr, and ret_void. Beside, we guess that we have to use
// 50 bytes to represent one LLVM instruction.
- return new CompiledInvokeStub(cunit_->GetElfIndex(), elf_func_idx);
+ return new CompiledInvokeStub(cunit_->GetInstructionSet(),
+ cunit_->GetElfIndex(), elf_func_idx);
}
@@ -268,7 +269,8 @@
// Add the memory usage approximation of the compilation unit
cunit_->AddMemUsageApproximation((shorty_size + 2) * 50);
- return new CompiledInvokeStub(cunit_->GetElfIndex(), elf_func_idx);
+ return new CompiledInvokeStub(cunit_->GetInstructionSet(),
+ cunit_->GetElfIndex(), elf_func_idx);
}
diff --git a/src/oat/jni/arm/jni_internal_arm.cc b/src/oat/jni/arm/jni_internal_arm.cc
index 78c3903..bbbe6ad 100644
--- a/src/oat/jni/arm/jni_internal_arm.cc
+++ b/src/oat/jni/arm/jni_internal_arm.cc
@@ -151,7 +151,7 @@
std::vector<uint8_t> code(assembler->CodeSize());
MemoryRegion region(&code[0], code.size());
assembler->FinalizeInstructions(region);
- return new CompiledInvokeStub(code);
+ return new CompiledInvokeStub(kArm, code);
#undef __
}
diff --git a/src/oat/jni/x86/jni_internal_x86.cc b/src/oat/jni/x86/jni_internal_x86.cc
index 6abeb49..498ca43 100644
--- a/src/oat/jni/x86/jni_internal_x86.cc
+++ b/src/oat/jni/x86/jni_internal_x86.cc
@@ -156,7 +156,7 @@
std::vector<uint8_t> code(assembler->CodeSize());
MemoryRegion region(&code[0], code.size());
assembler->FinalizeInstructions(region);
- return new CompiledInvokeStub(code);
+ return new CompiledInvokeStub(kX86, code);
#undef __
}