blob: cdbb9c31aab10387a36735cd839b2c625142249b [file] [log] [blame]
/*
* Copyright (C) 2014 The Android Open Source Project
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include "code_generator_arm.h"
#include "arch/arm/instruction_set_features_arm.h"
#include "art_method.h"
#include "code_generator_utils.h"
#include "compiled_method.h"
#include "entrypoints/quick/quick_entrypoints.h"
#include "gc/accounting/card_table.h"
#include "intrinsics.h"
#include "intrinsics_arm.h"
#include "mirror/array-inl.h"
#include "mirror/class-inl.h"
#include "thread.h"
#include "utils/arm/assembler_arm.h"
#include "utils/arm/managed_register_arm.h"
#include "utils/assembler.h"
#include "utils/stack_checks.h"
namespace art {
template<class MirrorType>
class GcRoot;
namespace arm {
static bool ExpectedPairLayout(Location location) {
// We expected this for both core and fpu register pairs.
return ((location.low() & 1) == 0) && (location.low() + 1 == location.high());
}
static constexpr int kCurrentMethodStackOffset = 0;
static constexpr Register kMethodRegisterArgument = R0;
static constexpr Register kCoreAlwaysSpillRegister = R5;
static constexpr Register kCoreCalleeSaves[] =
{ R5, R6, R7, R8, R10, R11, LR };
static constexpr SRegister kFpuCalleeSaves[] =
{ S16, S17, S18, S19, S20, S21, S22, S23, S24, S25, S26, S27, S28, S29, S30, S31 };
// D31 cannot be split into two S registers, and the register allocator only works on
// S registers. Therefore there is no need to block it.
static constexpr DRegister DTMP = D31;
static constexpr uint32_t kPackedSwitchCompareJumpThreshold = 7;
#define __ down_cast<ArmAssembler*>(codegen->GetAssembler())->
#define QUICK_ENTRY_POINT(x) QUICK_ENTRYPOINT_OFFSET(kArmWordSize, x).Int32Value()
class NullCheckSlowPathARM : public SlowPathCode {
public:
explicit NullCheckSlowPathARM(HNullCheck* instruction) : SlowPathCode(instruction) {}
void EmitNativeCode(CodeGenerator* codegen) OVERRIDE {
CodeGeneratorARM* arm_codegen = down_cast<CodeGeneratorARM*>(codegen);
__ Bind(GetEntryLabel());
if (instruction_->CanThrowIntoCatchBlock()) {
// Live registers will be restored in the catch block if caught.
SaveLiveRegisters(codegen, instruction_->GetLocations());
}
arm_codegen->InvokeRuntime(
QUICK_ENTRY_POINT(pThrowNullPointer), instruction_, instruction_->GetDexPc(), this);
CheckEntrypointTypes<kQuickThrowNullPointer, void, void>();
}
bool IsFatal() const OVERRIDE { return true; }
const char* GetDescription() const OVERRIDE { return "NullCheckSlowPathARM"; }
private:
DISALLOW_COPY_AND_ASSIGN(NullCheckSlowPathARM);
};
class DivZeroCheckSlowPathARM : public SlowPathCode {
public:
explicit DivZeroCheckSlowPathARM(HDivZeroCheck* instruction) : SlowPathCode(instruction) {}
void EmitNativeCode(CodeGenerator* codegen) OVERRIDE {
CodeGeneratorARM* arm_codegen = down_cast<CodeGeneratorARM*>(codegen);
__ Bind(GetEntryLabel());
if (instruction_->CanThrowIntoCatchBlock()) {
// Live registers will be restored in the catch block if caught.
SaveLiveRegisters(codegen, instruction_->GetLocations());
}
arm_codegen->InvokeRuntime(
QUICK_ENTRY_POINT(pThrowDivZero), instruction_, instruction_->GetDexPc(), this);
CheckEntrypointTypes<kQuickThrowDivZero, void, void>();
}
bool IsFatal() const OVERRIDE { return true; }
const char* GetDescription() const OVERRIDE { return "DivZeroCheckSlowPathARM"; }
private:
DISALLOW_COPY_AND_ASSIGN(DivZeroCheckSlowPathARM);
};
class SuspendCheckSlowPathARM : public SlowPathCode {
public:
SuspendCheckSlowPathARM(HSuspendCheck* instruction, HBasicBlock* successor)
: SlowPathCode(instruction), successor_(successor) {}
void EmitNativeCode(CodeGenerator* codegen) OVERRIDE {
CodeGeneratorARM* arm_codegen = down_cast<CodeGeneratorARM*>(codegen);
__ Bind(GetEntryLabel());
SaveLiveRegisters(codegen, instruction_->GetLocations());
arm_codegen->InvokeRuntime(
QUICK_ENTRY_POINT(pTestSuspend), instruction_, instruction_->GetDexPc(), this);
CheckEntrypointTypes<kQuickTestSuspend, void, void>();
RestoreLiveRegisters(codegen, instruction_->GetLocations());
if (successor_ == nullptr) {
__ b(GetReturnLabel());
} else {
__ b(arm_codegen->GetLabelOf(successor_));
}
}
Label* GetReturnLabel() {
DCHECK(successor_ == nullptr);
return &return_label_;
}
HBasicBlock* GetSuccessor() const {
return successor_;
}
const char* GetDescription() const OVERRIDE { return "SuspendCheckSlowPathARM"; }
private:
// If not null, the block to branch to after the suspend check.
HBasicBlock* const successor_;
// If `successor_` is null, the label to branch to after the suspend check.
Label return_label_;
DISALLOW_COPY_AND_ASSIGN(SuspendCheckSlowPathARM);
};
class BoundsCheckSlowPathARM : public SlowPathCode {
public:
explicit BoundsCheckSlowPathARM(HBoundsCheck* instruction)
: SlowPathCode(instruction) {}
void EmitNativeCode(CodeGenerator* codegen) OVERRIDE {
CodeGeneratorARM* arm_codegen = down_cast<CodeGeneratorARM*>(codegen);
LocationSummary* locations = instruction_->GetLocations();
__ Bind(GetEntryLabel());
if (instruction_->CanThrowIntoCatchBlock()) {
// Live registers will be restored in the catch block if caught.
SaveLiveRegisters(codegen, instruction_->GetLocations());
}
// We're moving two locations to locations that could overlap, so we need a parallel
// move resolver.
InvokeRuntimeCallingConvention calling_convention;
codegen->EmitParallelMoves(
locations->InAt(0),
Location::RegisterLocation(calling_convention.GetRegisterAt(0)),
Primitive::kPrimInt,
locations->InAt(1),
Location::RegisterLocation(calling_convention.GetRegisterAt(1)),
Primitive::kPrimInt);
arm_codegen->InvokeRuntime(
QUICK_ENTRY_POINT(pThrowArrayBounds), instruction_, instruction_->GetDexPc(), this);
CheckEntrypointTypes<kQuickThrowArrayBounds, void, int32_t, int32_t>();
}
bool IsFatal() const OVERRIDE { return true; }
const char* GetDescription() const OVERRIDE { return "BoundsCheckSlowPathARM"; }
private:
DISALLOW_COPY_AND_ASSIGN(BoundsCheckSlowPathARM);
};
class LoadClassSlowPathARM : public SlowPathCode {
public:
LoadClassSlowPathARM(HLoadClass* cls,
HInstruction* at,
uint32_t dex_pc,
bool do_clinit)
: SlowPathCode(at), cls_(cls), at_(at), dex_pc_(dex_pc), do_clinit_(do_clinit) {
DCHECK(at->IsLoadClass() || at->IsClinitCheck());
}
void EmitNativeCode(CodeGenerator* codegen) OVERRIDE {
LocationSummary* locations = at_->GetLocations();
CodeGeneratorARM* arm_codegen = down_cast<CodeGeneratorARM*>(codegen);
__ Bind(GetEntryLabel());
SaveLiveRegisters(codegen, locations);
InvokeRuntimeCallingConvention calling_convention;
__ LoadImmediate(calling_convention.GetRegisterAt(0), cls_->GetTypeIndex());
int32_t entry_point_offset = do_clinit_
? QUICK_ENTRY_POINT(pInitializeStaticStorage)
: QUICK_ENTRY_POINT(pInitializeType);
arm_codegen->InvokeRuntime(entry_point_offset, at_, dex_pc_, this);
if (do_clinit_) {
CheckEntrypointTypes<kQuickInitializeStaticStorage, void*, uint32_t>();
} else {
CheckEntrypointTypes<kQuickInitializeType, void*, uint32_t>();
}
// Move the class to the desired location.
Location out = locations->Out();
if (out.IsValid()) {
DCHECK(out.IsRegister() && !locations->GetLiveRegisters()->ContainsCoreRegister(out.reg()));
arm_codegen->Move32(locations->Out(), Location::RegisterLocation(R0));
}
RestoreLiveRegisters(codegen, locations);
__ b(GetExitLabel());
}
const char* GetDescription() const OVERRIDE { return "LoadClassSlowPathARM"; }
private:
// The class this slow path will load.
HLoadClass* const cls_;
// The instruction where this slow path is happening.
// (Might be the load class or an initialization check).
HInstruction* const at_;
// The dex PC of `at_`.
const uint32_t dex_pc_;
// Whether to initialize the class.
const bool do_clinit_;
DISALLOW_COPY_AND_ASSIGN(LoadClassSlowPathARM);
};
class LoadStringSlowPathARM : public SlowPathCode {
public:
explicit LoadStringSlowPathARM(HLoadString* instruction) : SlowPathCode(instruction) {}
void EmitNativeCode(CodeGenerator* codegen) OVERRIDE {
LocationSummary* locations = instruction_->GetLocations();
DCHECK(!locations->GetLiveRegisters()->ContainsCoreRegister(locations->Out().reg()));
CodeGeneratorARM* arm_codegen = down_cast<CodeGeneratorARM*>(codegen);
__ Bind(GetEntryLabel());
SaveLiveRegisters(codegen, locations);
InvokeRuntimeCallingConvention calling_convention;
const uint32_t string_index = instruction_->AsLoadString()->GetStringIndex();
__ LoadImmediate(calling_convention.GetRegisterAt(0), string_index);
arm_codegen->InvokeRuntime(
QUICK_ENTRY_POINT(pResolveString), instruction_, instruction_->GetDexPc(), this);
CheckEntrypointTypes<kQuickResolveString, void*, uint32_t>();
arm_codegen->Move32(locations->Out(), Location::RegisterLocation(R0));
RestoreLiveRegisters(codegen, locations);
__ b(GetExitLabel());
}
const char* GetDescription() const OVERRIDE { return "LoadStringSlowPathARM"; }
private:
DISALLOW_COPY_AND_ASSIGN(LoadStringSlowPathARM);
};
class TypeCheckSlowPathARM : public SlowPathCode {
public:
TypeCheckSlowPathARM(HInstruction* instruction, bool is_fatal)
: SlowPathCode(instruction), is_fatal_(is_fatal) {}
void EmitNativeCode(CodeGenerator* codegen) OVERRIDE {
LocationSummary* locations = instruction_->GetLocations();
Location object_class = instruction_->IsCheckCast() ? locations->GetTemp(0)
: locations->Out();
DCHECK(instruction_->IsCheckCast()
|| !locations->GetLiveRegisters()->ContainsCoreRegister(locations->Out().reg()));
CodeGeneratorARM* arm_codegen = down_cast<CodeGeneratorARM*>(codegen);
__ Bind(GetEntryLabel());
if (!is_fatal_) {
SaveLiveRegisters(codegen, locations);
}
// We're moving two locations to locations that could overlap, so we need a parallel
// move resolver.
InvokeRuntimeCallingConvention calling_convention;
codegen->EmitParallelMoves(
locations->InAt(1),
Location::RegisterLocation(calling_convention.GetRegisterAt(0)),
Primitive::kPrimNot,
object_class,
Location::RegisterLocation(calling_convention.GetRegisterAt(1)),
Primitive::kPrimNot);
if (instruction_->IsInstanceOf()) {
arm_codegen->InvokeRuntime(QUICK_ENTRY_POINT(pInstanceofNonTrivial),
instruction_,
instruction_->GetDexPc(),
this);
CheckEntrypointTypes<
kQuickInstanceofNonTrivial, uint32_t, const mirror::Class*, const mirror::Class*>();
arm_codegen->Move32(locations->Out(), Location::RegisterLocation(R0));
} else {
DCHECK(instruction_->IsCheckCast());
arm_codegen->InvokeRuntime(QUICK_ENTRY_POINT(pCheckCast),
instruction_,
instruction_->GetDexPc(),
this);
CheckEntrypointTypes<kQuickCheckCast, void, const mirror::Class*, const mirror::Class*>();
}
if (!is_fatal_) {
RestoreLiveRegisters(codegen, locations);
__ b(GetExitLabel());
}
}
const char* GetDescription() const OVERRIDE { return "TypeCheckSlowPathARM"; }
bool IsFatal() const OVERRIDE { return is_fatal_; }
private:
const bool is_fatal_;
DISALLOW_COPY_AND_ASSIGN(TypeCheckSlowPathARM);
};
class DeoptimizationSlowPathARM : public SlowPathCode {
public:
explicit DeoptimizationSlowPathARM(HDeoptimize* instruction)
: SlowPathCode(instruction) {}
void EmitNativeCode(CodeGenerator* codegen) OVERRIDE {
CodeGeneratorARM* arm_codegen = down_cast<CodeGeneratorARM*>(codegen);
__ Bind(GetEntryLabel());
SaveLiveRegisters(codegen, instruction_->GetLocations());
arm_codegen->InvokeRuntime(QUICK_ENTRY_POINT(pDeoptimize),
instruction_,
instruction_->GetDexPc(),
this);
CheckEntrypointTypes<kQuickDeoptimize, void, void>();
}
const char* GetDescription() const OVERRIDE { return "DeoptimizationSlowPathARM"; }
private:
DISALLOW_COPY_AND_ASSIGN(DeoptimizationSlowPathARM);
};
class ArraySetSlowPathARM : public SlowPathCode {
public:
explicit ArraySetSlowPathARM(HInstruction* instruction) : SlowPathCode(instruction) {}
void EmitNativeCode(CodeGenerator* codegen) OVERRIDE {
LocationSummary* locations = instruction_->GetLocations();
__ Bind(GetEntryLabel());
SaveLiveRegisters(codegen, locations);
InvokeRuntimeCallingConvention calling_convention;
HParallelMove parallel_move(codegen->GetGraph()->GetArena());
parallel_move.AddMove(
locations->InAt(0),
Location::RegisterLocation(calling_convention.GetRegisterAt(0)),
Primitive::kPrimNot,
nullptr);
parallel_move.AddMove(
locations->InAt(1),
Location::RegisterLocation(calling_convention.GetRegisterAt(1)),
Primitive::kPrimInt,
nullptr);
parallel_move.AddMove(
locations->InAt(2),
Location::RegisterLocation(calling_convention.GetRegisterAt(2)),
Primitive::kPrimNot,
nullptr);
codegen->GetMoveResolver()->EmitNativeCode(&parallel_move);
CodeGeneratorARM* arm_codegen = down_cast<CodeGeneratorARM*>(codegen);
arm_codegen->InvokeRuntime(QUICK_ENTRY_POINT(pAputObject),
instruction_,
instruction_->GetDexPc(),
this);
CheckEntrypointTypes<kQuickAputObject, void, mirror::Array*, int32_t, mirror::Object*>();
RestoreLiveRegisters(codegen, locations);
__ b(GetExitLabel());
}
const char* GetDescription() const OVERRIDE { return "ArraySetSlowPathARM"; }
private:
DISALLOW_COPY_AND_ASSIGN(ArraySetSlowPathARM);
};
// Slow path marking an object during a read barrier.
class ReadBarrierMarkSlowPathARM : public SlowPathCode {
public:
ReadBarrierMarkSlowPathARM(HInstruction* instruction, Location out, Location obj)
: SlowPathCode(instruction), out_(out), obj_(obj) {
DCHECK(kEmitCompilerReadBarrier);
}
const char* GetDescription() const OVERRIDE { return "ReadBarrierMarkSlowPathARM"; }
void EmitNativeCode(CodeGenerator* codegen) OVERRIDE {
LocationSummary* locations = instruction_->GetLocations();
Register reg_out = out_.AsRegister<Register>();
DCHECK(locations->CanCall());
DCHECK(!locations->GetLiveRegisters()->ContainsCoreRegister(reg_out));
DCHECK(instruction_->IsInstanceFieldGet() ||
instruction_->IsStaticFieldGet() ||
instruction_->IsArrayGet() ||
instruction_->IsLoadClass() ||
instruction_->IsLoadString() ||
instruction_->IsInstanceOf() ||
instruction_->IsCheckCast())
<< "Unexpected instruction in read barrier marking slow path: "
<< instruction_->DebugName();
__ Bind(GetEntryLabel());
SaveLiveRegisters(codegen, locations);
InvokeRuntimeCallingConvention calling_convention;
CodeGeneratorARM* arm_codegen = down_cast<CodeGeneratorARM*>(codegen);
arm_codegen->Move32(Location::RegisterLocation(calling_convention.GetRegisterAt(0)), obj_);
arm_codegen->InvokeRuntime(QUICK_ENTRY_POINT(pReadBarrierMark),
instruction_,
instruction_->GetDexPc(),
this);
CheckEntrypointTypes<kQuickReadBarrierMark, mirror::Object*, mirror::Object*>();
arm_codegen->Move32(out_, Location::RegisterLocation(R0));
RestoreLiveRegisters(codegen, locations);
__ b(GetExitLabel());
}
private:
const Location out_;
const Location obj_;
DISALLOW_COPY_AND_ASSIGN(ReadBarrierMarkSlowPathARM);
};
// Slow path generating a read barrier for a heap reference.
class ReadBarrierForHeapReferenceSlowPathARM : public SlowPathCode {
public:
ReadBarrierForHeapReferenceSlowPathARM(HInstruction* instruction,
Location out,
Location ref,
Location obj,
uint32_t offset,
Location index)
: SlowPathCode(instruction),
out_(out),
ref_(ref),
obj_(obj),
offset_(offset),
index_(index) {
DCHECK(kEmitCompilerReadBarrier);
// If `obj` is equal to `out` or `ref`, it means the initial object
// has been overwritten by (or after) the heap object reference load
// to be instrumented, e.g.:
//
// __ LoadFromOffset(kLoadWord, out, out, offset);
// codegen_->GenerateReadBarrierSlow(instruction, out_loc, out_loc, out_loc, offset);
//
// In that case, we have lost the information about the original
// object, and the emitted read barrier cannot work properly.
DCHECK(!obj.Equals(out)) << "obj=" << obj << " out=" << out;
DCHECK(!obj.Equals(ref)) << "obj=" << obj << " ref=" << ref;
}
void EmitNativeCode(CodeGenerator* codegen) OVERRIDE {
CodeGeneratorARM* arm_codegen = down_cast<CodeGeneratorARM*>(codegen);
LocationSummary* locations = instruction_->GetLocations();
Register reg_out = out_.AsRegister<Register>();
DCHECK(locations->CanCall());
DCHECK(!locations->GetLiveRegisters()->ContainsCoreRegister(reg_out));
DCHECK(!instruction_->IsInvoke() ||
(instruction_->IsInvokeStaticOrDirect() &&
instruction_->GetLocations()->Intrinsified()))
<< "Unexpected instruction in read barrier for heap reference slow path: "
<< instruction_->DebugName();
__ Bind(GetEntryLabel());
SaveLiveRegisters(codegen, locations);
// We may have to change the index's value, but as `index_` is a
// constant member (like other "inputs" of this slow path),
// introduce a copy of it, `index`.
Location index = index_;
if (index_.IsValid()) {
// Handle `index_` for HArrayGet and intrinsic UnsafeGetObject.
if (instruction_->IsArrayGet()) {
// Compute the actual memory offset and store it in `index`.
Register index_reg = index_.AsRegister<Register>();
DCHECK(locations->GetLiveRegisters()->ContainsCoreRegister(index_reg));
if (codegen->IsCoreCalleeSaveRegister(index_reg)) {
// We are about to change the value of `index_reg` (see the
// calls to art::arm::Thumb2Assembler::Lsl and
// art::arm::Thumb2Assembler::AddConstant below), but it has
// not been saved by the previous call to
// art::SlowPathCode::SaveLiveRegisters, as it is a
// callee-save register --
// art::SlowPathCode::SaveLiveRegisters does not consider
// callee-save registers, as it has been designed with the
// assumption that callee-save registers are supposed to be
// handled by the called function. So, as a callee-save
// register, `index_reg` _would_ eventually be saved onto
// the stack, but it would be too late: we would have
// changed its value earlier. Therefore, we manually save
// it here into another freely available register,
// `free_reg`, chosen of course among the caller-save
// registers (as a callee-save `free_reg` register would
// exhibit the same problem).
//
// Note we could have requested a temporary register from
// the register allocator instead; but we prefer not to, as
// this is a slow path, and we know we can find a
// caller-save register that is available.
Register free_reg = FindAvailableCallerSaveRegister(codegen);
__ Mov(free_reg, index_reg);
index_reg = free_reg;
index = Location::RegisterLocation(index_reg);
} else {
// The initial register stored in `index_` has already been
// saved in the call to art::SlowPathCode::SaveLiveRegisters
// (as it is not a callee-save register), so we can freely
// use it.
}
// Shifting the index value contained in `index_reg` by the scale
// factor (2) cannot overflow in practice, as the runtime is
// unable to allocate object arrays with a size larger than
// 2^26 - 1 (that is, 2^28 - 4 bytes).
__ Lsl(index_reg, index_reg, TIMES_4);
static_assert(
sizeof(mirror::HeapReference<mirror::Object>) == sizeof(int32_t),
"art::mirror::HeapReference<art::mirror::Object> and int32_t have different sizes.");
__ AddConstant(index_reg, index_reg, offset_);
} else {
DCHECK(instruction_->IsInvoke());
DCHECK(instruction_->GetLocations()->Intrinsified());
DCHECK((instruction_->AsInvoke()->GetIntrinsic() == Intrinsics::kUnsafeGetObject) ||
(instruction_->AsInvoke()->GetIntrinsic() == Intrinsics::kUnsafeGetObjectVolatile))
<< instruction_->AsInvoke()->GetIntrinsic();
DCHECK_EQ(offset_, 0U);
DCHECK(index_.IsRegisterPair());
// UnsafeGet's offset location is a register pair, the low
// part contains the correct offset.
index = index_.ToLow();
}
}
// We're moving two or three locations to locations that could
// overlap, so we need a parallel move resolver.
InvokeRuntimeCallingConvention calling_convention;
HParallelMove parallel_move(codegen->GetGraph()->GetArena());
parallel_move.AddMove(ref_,
Location::RegisterLocation(calling_convention.GetRegisterAt(0)),
Primitive::kPrimNot,
nullptr);
parallel_move.AddMove(obj_,
Location::RegisterLocation(calling_convention.GetRegisterAt(1)),
Primitive::kPrimNot,
nullptr);
if (index.IsValid()) {
parallel_move.AddMove(index,
Location::RegisterLocation(calling_convention.GetRegisterAt(2)),
Primitive::kPrimInt,
nullptr);
codegen->GetMoveResolver()->EmitNativeCode(&parallel_move);
} else {
codegen->GetMoveResolver()->EmitNativeCode(&parallel_move);
__ LoadImmediate(calling_convention.GetRegisterAt(2), offset_);
}
arm_codegen->InvokeRuntime(QUICK_ENTRY_POINT(pReadBarrierSlow),
instruction_,
instruction_->GetDexPc(),
this);
CheckEntrypointTypes<
kQuickReadBarrierSlow, mirror::Object*, mirror::Object*, mirror::Object*, uint32_t>();
arm_codegen->Move32(out_, Location::RegisterLocation(R0));
RestoreLiveRegisters(codegen, locations);
__ b(GetExitLabel());
}
const char* GetDescription() const OVERRIDE { return "ReadBarrierForHeapReferenceSlowPathARM"; }
private:
Register FindAvailableCallerSaveRegister(CodeGenerator* codegen) {
size_t ref = static_cast<int>(ref_.AsRegister<Register>());
size_t obj = static_cast<int>(obj_.AsRegister<Register>());
for (size_t i = 0, e = codegen->GetNumberOfCoreRegisters(); i < e; ++i) {
if (i != ref && i != obj && !codegen->IsCoreCalleeSaveRegister(i)) {
return static_cast<Register>(i);
}
}
// We shall never fail to find a free caller-save register, as
// there are more than two core caller-save registers on ARM
// (meaning it is possible to find one which is different from
// `ref` and `obj`).
DCHECK_GT(codegen->GetNumberOfCoreCallerSaveRegisters(), 2u);
LOG(FATAL) << "Could not find a free caller-save register";
UNREACHABLE();
}
const Location out_;
const Location ref_;
const Location obj_;
const uint32_t offset_;
// An additional location containing an index to an array.
// Only used for HArrayGet and the UnsafeGetObject &
// UnsafeGetObjectVolatile intrinsics.
const Location index_;
DISALLOW_COPY_AND_ASSIGN(ReadBarrierForHeapReferenceSlowPathARM);
};
// Slow path generating a read barrier for a GC root.
class ReadBarrierForRootSlowPathARM : public SlowPathCode {
public:
ReadBarrierForRootSlowPathARM(HInstruction* instruction, Location out, Location root)
: SlowPathCode(instruction), out_(out), root_(root) {
DCHECK(kEmitCompilerReadBarrier);
}
void EmitNativeCode(CodeGenerator* codegen) OVERRIDE {
LocationSummary* locations = instruction_->GetLocations();
Register reg_out = out_.AsRegister<Register>();
DCHECK(locations->CanCall());
DCHECK(!locations->GetLiveRegisters()->ContainsCoreRegister(reg_out));
DCHECK(instruction_->IsLoadClass() || instruction_->IsLoadString())
<< "Unexpected instruction in read barrier for GC root slow path: "
<< instruction_->DebugName();
__ Bind(GetEntryLabel());
SaveLiveRegisters(codegen, locations);
InvokeRuntimeCallingConvention calling_convention;
CodeGeneratorARM* arm_codegen = down_cast<CodeGeneratorARM*>(codegen);
arm_codegen->Move32(Location::RegisterLocation(calling_convention.GetRegisterAt(0)), root_);
arm_codegen->InvokeRuntime(QUICK_ENTRY_POINT(pReadBarrierForRootSlow),
instruction_,
instruction_->GetDexPc(),
this);
CheckEntrypointTypes<kQuickReadBarrierForRootSlow, mirror::Object*, GcRoot<mirror::Object>*>();
arm_codegen->Move32(out_, Location::RegisterLocation(R0));
RestoreLiveRegisters(codegen, locations);
__ b(GetExitLabel());
}
const char* GetDescription() const OVERRIDE { return "ReadBarrierForRootSlowPathARM"; }
private:
const Location out_;
const Location root_;
DISALLOW_COPY_AND_ASSIGN(ReadBarrierForRootSlowPathARM);
};
#undef __
#define __ down_cast<ArmAssembler*>(GetAssembler())->
inline Condition ARMCondition(IfCondition cond) {
switch (cond) {
case kCondEQ: return EQ;
case kCondNE: return NE;
case kCondLT: return LT;
case kCondLE: return LE;
case kCondGT: return GT;
case kCondGE: return GE;
case kCondB: return LO;
case kCondBE: return LS;
case kCondA: return HI;
case kCondAE: return HS;
}
LOG(FATAL) << "Unreachable";
UNREACHABLE();
}
// Maps signed condition to unsigned condition.
inline Condition ARMUnsignedCondition(IfCondition cond) {
switch (cond) {
case kCondEQ: return EQ;
case kCondNE: return NE;
// Signed to unsigned.
case kCondLT: return LO;
case kCondLE: return LS;
case kCondGT: return HI;
case kCondGE: return HS;
// Unsigned remain unchanged.
case kCondB: return LO;
case kCondBE: return LS;
case kCondA: return HI;
case kCondAE: return HS;
}
LOG(FATAL) << "Unreachable";
UNREACHABLE();
}
inline Condition ARMFPCondition(IfCondition cond, bool gt_bias) {
// The ARM condition codes can express all the necessary branches, see the
// "Meaning (floating-point)" column in the table A8-1 of the ARMv7 reference manual.
// There is no dex instruction or HIR that would need the missing conditions
// "equal or unordered" or "not equal".
switch (cond) {
case kCondEQ: return EQ;
case kCondNE: return NE /* unordered */;
case kCondLT: return gt_bias ? CC : LT /* unordered */;
case kCondLE: return gt_bias ? LS : LE /* unordered */;
case kCondGT: return gt_bias ? HI /* unordered */ : GT;
case kCondGE: return gt_bias ? CS /* unordered */ : GE;
default:
LOG(FATAL) << "UNREACHABLE";
UNREACHABLE();
}
}
void CodeGeneratorARM::DumpCoreRegister(std::ostream& stream, int reg) const {
stream << Register(reg);
}
void CodeGeneratorARM::DumpFloatingPointRegister(std::ostream& stream, int reg) const {
stream << SRegister(reg);
}
size_t CodeGeneratorARM::SaveCoreRegister(size_t stack_index, uint32_t reg_id) {
__ StoreToOffset(kStoreWord, static_cast<Register>(reg_id), SP, stack_index);
return kArmWordSize;
}
size_t CodeGeneratorARM::RestoreCoreRegister(size_t stack_index, uint32_t reg_id) {
__ LoadFromOffset(kLoadWord, static_cast<Register>(reg_id), SP, stack_index);
return kArmWordSize;
}
size_t CodeGeneratorARM::SaveFloatingPointRegister(size_t stack_index, uint32_t reg_id) {
__ StoreSToOffset(static_cast<SRegister>(reg_id), SP, stack_index);
return kArmWordSize;
}
size_t CodeGeneratorARM::RestoreFloatingPointRegister(size_t stack_index, uint32_t reg_id) {
__ LoadSFromOffset(static_cast<SRegister>(reg_id), SP, stack_index);
return kArmWordSize;
}
CodeGeneratorARM::CodeGeneratorARM(HGraph* graph,
const ArmInstructionSetFeatures& isa_features,
const CompilerOptions& compiler_options,
OptimizingCompilerStats* stats)
: CodeGenerator(graph,
kNumberOfCoreRegisters,
kNumberOfSRegisters,
kNumberOfRegisterPairs,
ComputeRegisterMask(reinterpret_cast<const int*>(kCoreCalleeSaves),
arraysize(kCoreCalleeSaves)),
ComputeRegisterMask(reinterpret_cast<const int*>(kFpuCalleeSaves),
arraysize(kFpuCalleeSaves)),
compiler_options,
stats),
block_labels_(nullptr),
location_builder_(graph, this),
instruction_visitor_(graph, this),
move_resolver_(graph->GetArena(), this),
assembler_(),
isa_features_(isa_features),
method_patches_(MethodReferenceComparator(),
graph->GetArena()->Adapter(kArenaAllocCodeGenerator)),
call_patches_(MethodReferenceComparator(),
graph->GetArena()->Adapter(kArenaAllocCodeGenerator)),
relative_call_patches_(graph->GetArena()->Adapter(kArenaAllocCodeGenerator)),
dex_cache_arrays_base_labels_(std::less<HArmDexCacheArraysBase*>(),
graph->GetArena()->Adapter(kArenaAllocCodeGenerator)) {
// Always save the LR register to mimic Quick.
AddAllocatedRegister(Location::RegisterLocation(LR));
}
void CodeGeneratorARM::Finalize(CodeAllocator* allocator) {
// Ensure that we fix up branches and literal loads and emit the literal pool.
__ FinalizeCode();
// Adjust native pc offsets in stack maps.
for (size_t i = 0, num = stack_map_stream_.GetNumberOfStackMaps(); i != num; ++i) {
uint32_t old_position = stack_map_stream_.GetStackMap(i).native_pc_offset;
uint32_t new_position = __ GetAdjustedPosition(old_position);
stack_map_stream_.SetStackMapNativePcOffset(i, new_position);
}
// Adjust pc offsets for the disassembly information.
if (disasm_info_ != nullptr) {
GeneratedCodeInterval* frame_entry_interval = disasm_info_->GetFrameEntryInterval();
frame_entry_interval->start = __ GetAdjustedPosition(frame_entry_interval->start);
frame_entry_interval->end = __ GetAdjustedPosition(frame_entry_interval->end);
for (auto& it : *disasm_info_->GetInstructionIntervals()) {
it.second.start = __ GetAdjustedPosition(it.second.start);
it.second.end = __ GetAdjustedPosition(it.second.end);
}
for (auto& it : *disasm_info_->GetSlowPathIntervals()) {
it.code_interval.start = __ GetAdjustedPosition(it.code_interval.start);
it.code_interval.end = __ GetAdjustedPosition(it.code_interval.end);
}
}
CodeGenerator::Finalize(allocator);
}
void CodeGeneratorARM::SetupBlockedRegisters() const {
// Don't allocate the dalvik style register pair passing.
blocked_register_pairs_[R1_R2] = true;
// Stack register, LR and PC are always reserved.
blocked_core_registers_[SP] = true;
blocked_core_registers_[LR] = true;
blocked_core_registers_[PC] = true;
// Reserve thread register.
blocked_core_registers_[TR] = true;
// Reserve temp register.
blocked_core_registers_[IP] = true;
if (GetGraph()->IsDebuggable()) {
// Stubs do not save callee-save floating point registers. If the graph
// is debuggable, we need to deal with these registers differently. For
// now, just block them.
for (size_t i = 0; i < arraysize(kFpuCalleeSaves); ++i) {
blocked_fpu_registers_[kFpuCalleeSaves[i]] = true;
}
}
UpdateBlockedPairRegisters();
}
void CodeGeneratorARM::UpdateBlockedPairRegisters() const {
for (int i = 0; i < kNumberOfRegisterPairs; i++) {
ArmManagedRegister current =
ArmManagedRegister::FromRegisterPair(static_cast<RegisterPair>(i));
if (blocked_core_registers_[current.AsRegisterPairLow()]
|| blocked_core_registers_[current.AsRegisterPairHigh()]) {
blocked_register_pairs_[i] = true;
}
}
}
InstructionCodeGeneratorARM::InstructionCodeGeneratorARM(HGraph* graph, CodeGeneratorARM* codegen)
: InstructionCodeGenerator(graph, codegen),
assembler_(codegen->GetAssembler()),
codegen_(codegen) {}
void CodeGeneratorARM::ComputeSpillMask() {
core_spill_mask_ = allocated_registers_.GetCoreRegisters() & core_callee_save_mask_;
DCHECK_NE(core_spill_mask_, 0u) << "At least the return address register must be saved";
// There is no easy instruction to restore just the PC on thumb2. We spill and
// restore another arbitrary register.
core_spill_mask_ |= (1 << kCoreAlwaysSpillRegister);
fpu_spill_mask_ = allocated_registers_.GetFloatingPointRegisters() & fpu_callee_save_mask_;
// We use vpush and vpop for saving and restoring floating point registers, which take
// a SRegister and the number of registers to save/restore after that SRegister. We
// therefore update the `fpu_spill_mask_` to also contain those registers not allocated,
// but in the range.
if (fpu_spill_mask_ != 0) {
uint32_t least_significant_bit = LeastSignificantBit(fpu_spill_mask_);
uint32_t most_significant_bit = MostSignificantBit(fpu_spill_mask_);
for (uint32_t i = least_significant_bit + 1 ; i < most_significant_bit; ++i) {
fpu_spill_mask_ |= (1 << i);
}
}
}
static dwarf::Reg DWARFReg(Register reg) {
return dwarf::Reg::ArmCore(static_cast<int>(reg));
}
static dwarf::Reg DWARFReg(SRegister reg) {
return dwarf::Reg::ArmFp(static_cast<int>(reg));
}
void CodeGeneratorARM::GenerateFrameEntry() {
bool skip_overflow_check =
IsLeafMethod() && !FrameNeedsStackCheck(GetFrameSize(), InstructionSet::kArm);
DCHECK(GetCompilerOptions().GetImplicitStackOverflowChecks());
__ Bind(&frame_entry_label_);
if (HasEmptyFrame()) {
return;
}
if (!skip_overflow_check) {
__ AddConstant(IP, SP, -static_cast<int32_t>(GetStackOverflowReservedBytes(kArm)));
__ LoadFromOffset(kLoadWord, IP, IP, 0);
RecordPcInfo(nullptr, 0);
}
__ PushList(core_spill_mask_);
__ cfi().AdjustCFAOffset(kArmWordSize * POPCOUNT(core_spill_mask_));
__ cfi().RelOffsetForMany(DWARFReg(kMethodRegisterArgument), 0, core_spill_mask_, kArmWordSize);
if (fpu_spill_mask_ != 0) {
SRegister start_register = SRegister(LeastSignificantBit(fpu_spill_mask_));
__ vpushs(start_register, POPCOUNT(fpu_spill_mask_));
__ cfi().AdjustCFAOffset(kArmWordSize * POPCOUNT(fpu_spill_mask_));
__ cfi().RelOffsetForMany(DWARFReg(S0), 0, fpu_spill_mask_, kArmWordSize);
}
int adjust = GetFrameSize() - FrameEntrySpillSize();
__ AddConstant(SP, -adjust);
__ cfi().AdjustCFAOffset(adjust);
__ StoreToOffset(kStoreWord, kMethodRegisterArgument, SP, 0);
}
void CodeGeneratorARM::GenerateFrameExit() {
if (HasEmptyFrame()) {
__ bx(LR);
return;
}
__ cfi().RememberState();
int adjust = GetFrameSize() - FrameEntrySpillSize();
__ AddConstant(SP, adjust);
__ cfi().AdjustCFAOffset(-adjust);
if (fpu_spill_mask_ != 0) {
SRegister start_register = SRegister(LeastSignificantBit(fpu_spill_mask_));
__ vpops(start_register, POPCOUNT(fpu_spill_mask_));
__ cfi().AdjustCFAOffset(-kArmPointerSize * POPCOUNT(fpu_spill_mask_));
__ cfi().RestoreMany(DWARFReg(SRegister(0)), fpu_spill_mask_);
}
// Pop LR into PC to return.
DCHECK_NE(core_spill_mask_ & (1 << LR), 0U);
uint32_t pop_mask = (core_spill_mask_ & (~(1 << LR))) | 1 << PC;
__ PopList(pop_mask);
__ cfi().RestoreState();
__ cfi().DefCFAOffset(GetFrameSize());
}
void CodeGeneratorARM::Bind(HBasicBlock* block) {
Label* label = GetLabelOf(block);
__ BindTrackedLabel(label);
}
Location CodeGeneratorARM::GetStackLocation(HLoadLocal* load) const {
switch (load->GetType()) {
case Primitive::kPrimLong:
case Primitive::kPrimDouble:
return Location::DoubleStackSlot(GetStackSlot(load->GetLocal()));
case Primitive::kPrimInt:
case Primitive::kPrimNot:
case Primitive::kPrimFloat:
return Location::StackSlot(GetStackSlot(load->GetLocal()));
case Primitive::kPrimBoolean:
case Primitive::kPrimByte:
case Primitive::kPrimChar:
case Primitive::kPrimShort:
case Primitive::kPrimVoid:
LOG(FATAL) << "Unexpected type " << load->GetType();
UNREACHABLE();
}
LOG(FATAL) << "Unreachable";
UNREACHABLE();
}
Location InvokeDexCallingConventionVisitorARM::GetNextLocation(Primitive::Type type) {
switch (type) {
case Primitive::kPrimBoolean:
case Primitive::kPrimByte:
case Primitive::kPrimChar:
case Primitive::kPrimShort:
case Primitive::kPrimInt:
case Primitive::kPrimNot: {
uint32_t index = gp_index_++;
uint32_t stack_index = stack_index_++;
if (index < calling_convention.GetNumberOfRegisters()) {
return Location::RegisterLocation(calling_convention.GetRegisterAt(index));
} else {
return Location::StackSlot(calling_convention.GetStackOffsetOf(stack_index));
}
}
case Primitive::kPrimLong: {
uint32_t index = gp_index_;
uint32_t stack_index = stack_index_;
gp_index_ += 2;
stack_index_ += 2;
if (index + 1 < calling_convention.GetNumberOfRegisters()) {
if (calling_convention.GetRegisterAt(index) == R1) {
// Skip R1, and use R2_R3 instead.
gp_index_++;
index++;
}
}
if (index + 1 < calling_convention.GetNumberOfRegisters()) {
DCHECK_EQ(calling_convention.GetRegisterAt(index) + 1,
calling_convention.GetRegisterAt(index + 1));
return Location::RegisterPairLocation(calling_convention.GetRegisterAt(index),
calling_convention.GetRegisterAt(index + 1));
} else {
return Location::DoubleStackSlot(calling_convention.GetStackOffsetOf(stack_index));
}
}
case Primitive::kPrimFloat: {
uint32_t stack_index = stack_index_++;
if (float_index_ % 2 == 0) {
float_index_ = std::max(double_index_, float_index_);
}
if (float_index_ < calling_convention.GetNumberOfFpuRegisters()) {
return Location::FpuRegisterLocation(calling_convention.GetFpuRegisterAt(float_index_++));
} else {
return Location::StackSlot(calling_convention.GetStackOffsetOf(stack_index));
}
}
case Primitive::kPrimDouble: {
double_index_ = std::max(double_index_, RoundUp(float_index_, 2));
uint32_t stack_index = stack_index_;
stack_index_ += 2;
if (double_index_ + 1 < calling_convention.GetNumberOfFpuRegisters()) {
uint32_t index = double_index_;
double_index_ += 2;
Location result = Location::FpuRegisterPairLocation(
calling_convention.GetFpuRegisterAt(index),
calling_convention.GetFpuRegisterAt(index + 1));
DCHECK(ExpectedPairLayout(result));
return result;
} else {
return Location::DoubleStackSlot(calling_convention.GetStackOffsetOf(stack_index));
}
}
case Primitive::kPrimVoid:
LOG(FATAL) << "Unexpected parameter type " << type;
break;
}
return Location::NoLocation();
}
Location InvokeDexCallingConventionVisitorARM::GetReturnLocation(Primitive::Type type) const {
switch (type) {
case Primitive::kPrimBoolean:
case Primitive::kPrimByte:
case Primitive::kPrimChar:
case Primitive::kPrimShort:
case Primitive::kPrimInt:
case Primitive::kPrimNot: {
return Location::RegisterLocation(R0);
}
case Primitive::kPrimFloat: {
return Location::FpuRegisterLocation(S0);
}
case Primitive::kPrimLong: {
return Location::RegisterPairLocation(R0, R1);
}
case Primitive::kPrimDouble: {
return Location::FpuRegisterPairLocation(S0, S1);
}
case Primitive::kPrimVoid:
return Location::NoLocation();
}
UNREACHABLE();
}
Location InvokeDexCallingConventionVisitorARM::GetMethodLocation() const {
return Location::RegisterLocation(kMethodRegisterArgument);
}
void CodeGeneratorARM::Move32(Location destination, Location source) {
if (source.Equals(destination)) {
return;
}
if (destination.IsRegister()) {
if (source.IsRegister()) {
__ Mov(destination.AsRegister<Register>(), source.AsRegister<Register>());
} else if (source.IsFpuRegister()) {
__ vmovrs(destination.AsRegister<Register>(), source.AsFpuRegister<SRegister>());
} else {
__ LoadFromOffset(kLoadWord, destination.AsRegister<Register>(), SP, source.GetStackIndex());
}
} else if (destination.IsFpuRegister()) {
if (source.IsRegister()) {
__ vmovsr(destination.AsFpuRegister<SRegister>(), source.AsRegister<Register>());
} else if (source.IsFpuRegister()) {
__ vmovs(destination.AsFpuRegister<SRegister>(), source.AsFpuRegister<SRegister>());
} else {
__ LoadSFromOffset(destination.AsFpuRegister<SRegister>(), SP, source.GetStackIndex());
}
} else {
DCHECK(destination.IsStackSlot()) << destination;
if (source.IsRegister()) {
__ StoreToOffset(kStoreWord, source.AsRegister<Register>(), SP, destination.GetStackIndex());
} else if (source.IsFpuRegister()) {
__ StoreSToOffset(source.AsFpuRegister<SRegister>(), SP, destination.GetStackIndex());
} else {
DCHECK(source.IsStackSlot()) << source;
__ LoadFromOffset(kLoadWord, IP, SP, source.GetStackIndex());
__ StoreToOffset(kStoreWord, IP, SP, destination.GetStackIndex());
}
}
}
void CodeGeneratorARM::Move64(Location destination, Location source) {
if (source.Equals(destination)) {
return;
}
if (destination.IsRegisterPair()) {
if (source.IsRegisterPair()) {
EmitParallelMoves(
Location::RegisterLocation(source.AsRegisterPairHigh<Register>()),
Location::RegisterLocation(destination.AsRegisterPairHigh<Register>()),
Primitive::kPrimInt,
Location::RegisterLocation(source.AsRegisterPairLow<Register>()),
Location::RegisterLocation(destination.AsRegisterPairLow<Register>()),
Primitive::kPrimInt);
} else if (source.IsFpuRegister()) {
UNIMPLEMENTED(FATAL);
} else if (source.IsFpuRegisterPair()) {
__ vmovrrd(destination.AsRegisterPairLow<Register>(),
destination.AsRegisterPairHigh<Register>(),
FromLowSToD(source.AsFpuRegisterPairLow<SRegister>()));
} else {
DCHECK(source.IsDoubleStackSlot());
DCHECK(ExpectedPairLayout(destination));
__ LoadFromOffset(kLoadWordPair, destination.AsRegisterPairLow<Register>(),
SP, source.GetStackIndex());
}
} else if (destination.IsFpuRegisterPair()) {
if (source.IsDoubleStackSlot()) {
__ LoadDFromOffset(FromLowSToD(destination.AsFpuRegisterPairLow<SRegister>()),
SP,
source.GetStackIndex());
} else if (source.IsRegisterPair()) {
__ vmovdrr(FromLowSToD(destination.AsFpuRegisterPairLow<SRegister>()),
source.AsRegisterPairLow<Register>(),
source.AsRegisterPairHigh<Register>());
} else {
UNIMPLEMENTED(FATAL);
}
} else {
DCHECK(destination.IsDoubleStackSlot());
if (source.IsRegisterPair()) {
// No conflict possible, so just do the moves.
if (source.AsRegisterPairLow<Register>() == R1) {
DCHECK_EQ(source.AsRegisterPairHigh<Register>(), R2);
__ StoreToOffset(kStoreWord, R1, SP, destination.GetStackIndex());
__ StoreToOffset(kStoreWord, R2, SP, destination.GetHighStackIndex(kArmWordSize));
} else {
__ StoreToOffset(kStoreWordPair, source.AsRegisterPairLow<Register>(),
SP, destination.GetStackIndex());
}
} else if (source.IsFpuRegisterPair()) {
__ StoreDToOffset(FromLowSToD(source.AsFpuRegisterPairLow<SRegister>()),
SP,
destination.GetStackIndex());
} else {
DCHECK(source.IsDoubleStackSlot());
EmitParallelMoves(
Location::StackSlot(source.GetStackIndex()),
Location::StackSlot(destination.GetStackIndex()),
Primitive::kPrimInt,
Location::StackSlot(source.GetHighStackIndex(kArmWordSize)),
Location::StackSlot(destination.GetHighStackIndex(kArmWordSize)),
Primitive::kPrimInt);
}
}
}
void CodeGeneratorARM::MoveConstant(Location location, int32_t value) {
DCHECK(location.IsRegister());
__ LoadImmediate(location.AsRegister<Register>(), value);
}
void CodeGeneratorARM::MoveLocation(Location dst, Location src, Primitive::Type dst_type) {
HParallelMove move(GetGraph()->GetArena());
move.AddMove(src, dst, dst_type, nullptr);
GetMoveResolver()->EmitNativeCode(&move);
}
void CodeGeneratorARM::AddLocationAsTemp(Location location, LocationSummary* locations) {
if (location.IsRegister()) {
locations->AddTemp(location);
} else if (location.IsRegisterPair()) {
locations->AddTemp(Location::RegisterLocation(location.AsRegisterPairLow<Register>()));
locations->AddTemp(Location::RegisterLocation(location.AsRegisterPairHigh<Register>()));
} else {
UNIMPLEMENTED(FATAL) << "AddLocationAsTemp not implemented for location " << location;
}
}
void CodeGeneratorARM::InvokeRuntime(QuickEntrypointEnum entrypoint,
HInstruction* instruction,
uint32_t dex_pc,
SlowPathCode* slow_path) {
InvokeRuntime(GetThreadOffset<kArmWordSize>(entrypoint).Int32Value(),
instruction,
dex_pc,
slow_path);
}
void CodeGeneratorARM::InvokeRuntime(int32_t entry_point_offset,
HInstruction* instruction,
uint32_t dex_pc,
SlowPathCode* slow_path) {
ValidateInvokeRuntime(instruction, slow_path);
__ LoadFromOffset(kLoadWord, LR, TR, entry_point_offset);
__ blx(LR);
RecordPcInfo(instruction, dex_pc, slow_path);
}
void InstructionCodeGeneratorARM::HandleGoto(HInstruction* got, HBasicBlock* successor) {
DCHECK(!successor->IsExitBlock());
HBasicBlock* block = got->GetBlock();
HInstruction* previous = got->GetPrevious();
HLoopInformation* info = block->GetLoopInformation();
if (info != nullptr && info->IsBackEdge(*block) && info->HasSuspendCheck()) {
codegen_->ClearSpillSlotsFromLoopPhisInStackMap(info->GetSuspendCheck());
GenerateSuspendCheck(info->GetSuspendCheck(), successor);
return;
}
if (block->IsEntryBlock() && (previous != nullptr) && previous->IsSuspendCheck()) {
GenerateSuspendCheck(previous->AsSuspendCheck(), nullptr);
}
if (!codegen_->GoesToNextBlock(got->GetBlock(), successor)) {
__ b(codegen_->GetLabelOf(successor));
}
}
void LocationsBuilderARM::VisitGoto(HGoto* got) {
got->SetLocations(nullptr);
}
void InstructionCodeGeneratorARM::VisitGoto(HGoto* got) {
HandleGoto(got, got->GetSuccessor());
}
void LocationsBuilderARM::VisitTryBoundary(HTryBoundary* try_boundary) {
try_boundary->SetLocations(nullptr);
}
void InstructionCodeGeneratorARM::VisitTryBoundary(HTryBoundary* try_boundary) {
HBasicBlock* successor = try_boundary->GetNormalFlowSuccessor();
if (!successor->IsExitBlock()) {
HandleGoto(try_boundary, successor);
}
}
void LocationsBuilderARM::VisitExit(HExit* exit) {
exit->SetLocations(nullptr);
}
void InstructionCodeGeneratorARM::VisitExit(HExit* exit ATTRIBUTE_UNUSED) {
}
void InstructionCodeGeneratorARM::GenerateFPJumps(HCondition* cond,
Label* true_label,
Label* false_label ATTRIBUTE_UNUSED) {
__ vmstat(); // transfer FP status register to ARM APSR.
__ b(true_label, ARMFPCondition(cond->GetCondition(), cond->IsGtBias()));
}
void InstructionCodeGeneratorARM::GenerateLongComparesAndJumps(HCondition* cond,
Label* true_label,
Label* false_label) {
LocationSummary* locations = cond->GetLocations();
Location left = locations->InAt(0);
Location right = locations->InAt(1);
IfCondition if_cond = cond->GetCondition();
Register left_high = left.AsRegisterPairHigh<Register>();
Register left_low = left.AsRegisterPairLow<Register>();
IfCondition true_high_cond = if_cond;
IfCondition false_high_cond = cond->GetOppositeCondition();
Condition final_condition = ARMUnsignedCondition(if_cond); // unsigned on lower part
// Set the conditions for the test, remembering that == needs to be
// decided using the low words.
// TODO: consider avoiding jumps with temporary and CMP low+SBC high
switch (if_cond) {
case kCondEQ:
case kCondNE:
// Nothing to do.
break;
case kCondLT:
false_high_cond = kCondGT;
break;
case kCondLE:
true_high_cond = kCondLT;
break;
case kCondGT:
false_high_cond = kCondLT;
break;
case kCondGE:
true_high_cond = kCondGT;
break;
case kCondB:
false_high_cond = kCondA;
break;
case kCondBE:
true_high_cond = kCondB;
break;
case kCondA:
false_high_cond = kCondB;
break;
case kCondAE:
true_high_cond = kCondA;
break;
}
if (right.IsConstant()) {
int64_t value = right.GetConstant()->AsLongConstant()->GetValue();
int32_t val_low = Low32Bits(value);
int32_t val_high = High32Bits(value);
__ CmpConstant(left_high, val_high);
if (if_cond == kCondNE) {
__ b(true_label, ARMCondition(true_high_cond));
} else if (if_cond == kCondEQ) {
__ b(false_label, ARMCondition(false_high_cond));
} else {
__ b(true_label, ARMCondition(true_high_cond));
__ b(false_label, ARMCondition(false_high_cond));
}
// Must be equal high, so compare the lows.
__ CmpConstant(left_low, val_low);
} else {
Register right_high = right.AsRegisterPairHigh<Register>();
Register right_low = right.AsRegisterPairLow<Register>();
__ cmp(left_high, ShifterOperand(right_high));
if (if_cond == kCondNE) {
__ b(true_label, ARMCondition(true_high_cond));
} else if (if_cond == kCondEQ) {
__ b(false_label, ARMCondition(false_high_cond));
} else {
__ b(true_label, ARMCondition(true_high_cond));
__ b(false_label, ARMCondition(false_high_cond));
}
// Must be equal high, so compare the lows.
__ cmp(left_low, ShifterOperand(right_low));
}
// The last comparison might be unsigned.
// TODO: optimize cases where this is always true/false
__ b(true_label, final_condition);
}
void InstructionCodeGeneratorARM::GenerateCompareTestAndBranch(HCondition* condition,
Label* true_target_in,
Label* false_target_in) {
// Generated branching requires both targets to be explicit. If either of the
// targets is nullptr (fallthrough) use and bind `fallthrough_target` instead.
Label fallthrough_target;
Label* true_target = true_target_in == nullptr ? &fallthrough_target : true_target_in;
Label* false_target = false_target_in == nullptr ? &fallthrough_target : false_target_in;
LocationSummary* locations = condition->GetLocations();
Location left = locations->InAt(0);
Location right = locations->InAt(1);
Primitive::Type type = condition->InputAt(0)->GetType();
switch (type) {
case Primitive::kPrimLong:
GenerateLongComparesAndJumps(condition, true_target, false_target);
break;
case Primitive::kPrimFloat:
__ vcmps(left.AsFpuRegister<SRegister>(), right.AsFpuRegister<SRegister>());
GenerateFPJumps(condition, true_target, false_target);
break;
case Primitive::kPrimDouble:
__ vcmpd(FromLowSToD(left.AsFpuRegisterPairLow<SRegister>()),
FromLowSToD(right.AsFpuRegisterPairLow<SRegister>()));
GenerateFPJumps(condition, true_target, false_target);
break;
default:
LOG(FATAL) << "Unexpected compare type " << type;
}
if (false_target != &fallthrough_target) {
__ b(false_target);
}
if (fallthrough_target.IsLinked()) {
__ Bind(&fallthrough_target);
}
}
void InstructionCodeGeneratorARM::GenerateTestAndBranch(HInstruction* instruction,
size_t condition_input_index,
Label* true_target,
Label* false_target) {
HInstruction* cond = instruction->InputAt(condition_input_index);
if (true_target == nullptr && false_target == nullptr) {
// Nothing to do. The code always falls through.
return;
} else if (cond->IsIntConstant()) {
// Constant condition, statically compared against 1.
if (cond->AsIntConstant()->IsOne()) {
if (true_target != nullptr) {
__ b(true_target);
}
} else {
DCHECK(cond->AsIntConstant()->IsZero());
if (false_target != nullptr) {
__ b(false_target);
}
}
return;
}
// The following code generates these patterns:
// (1) true_target == nullptr && false_target != nullptr
// - opposite condition true => branch to false_target
// (2) true_target != nullptr && false_target == nullptr
// - condition true => branch to true_target
// (3) true_target != nullptr && false_target != nullptr
// - condition true => branch to true_target
// - branch to false_target
if (IsBooleanValueOrMaterializedCondition(cond)) {
// Condition has been materialized, compare the output to 0.
Location cond_val = instruction->GetLocations()->InAt(condition_input_index);
DCHECK(cond_val.IsRegister());
if (true_target == nullptr) {
__ CompareAndBranchIfZero(cond_val.AsRegister<Register>(), false_target);
} else {
__ CompareAndBranchIfNonZero(cond_val.AsRegister<Register>(), true_target);
}
} else {
// Condition has not been materialized. Use its inputs as the comparison and
// its condition as the branch condition.
HCondition* condition = cond->AsCondition();
// If this is a long or FP comparison that has been folded into
// the HCondition, generate the comparison directly.
Primitive::Type type = condition->InputAt(0)->GetType();
if (type == Primitive::kPrimLong || Primitive::IsFloatingPointType(type)) {
GenerateCompareTestAndBranch(condition, true_target, false_target);
return;
}
LocationSummary* locations = cond->GetLocations();
DCHECK(locations->InAt(0).IsRegister());
Register left = locations->InAt(0).AsRegister<Register>();
Location right = locations->InAt(1);
if (right.IsRegister()) {
__ cmp(left, ShifterOperand(right.AsRegister<Register>()));
} else {
DCHECK(right.IsConstant());
__ CmpConstant(left, CodeGenerator::GetInt32ValueOf(right.GetConstant()));
}
if (true_target == nullptr) {
__ b(false_target, ARMCondition(condition->GetOppositeCondition()));
} else {
__ b(true_target, ARMCondition(condition->GetCondition()));
}
}
// If neither branch falls through (case 3), the conditional branch to `true_target`
// was already emitted (case 2) and we need to emit a jump to `false_target`.
if (true_target != nullptr && false_target != nullptr) {
__ b(false_target);
}
}
void LocationsBuilderARM::VisitIf(HIf* if_instr) {
LocationSummary* locations = new (GetGraph()->GetArena()) LocationSummary(if_instr);
if (IsBooleanValueOrMaterializedCondition(if_instr->InputAt(0))) {
locations->SetInAt(0, Location::RequiresRegister());
}
}
void InstructionCodeGeneratorARM::VisitIf(HIf* if_instr) {
HBasicBlock* true_successor = if_instr->IfTrueSuccessor();
HBasicBlock* false_successor = if_instr->IfFalseSuccessor();
Label* true_target = codegen_->GoesToNextBlock(if_instr->GetBlock(), true_successor) ?
nullptr : codegen_->GetLabelOf(true_successor);
Label* false_target = codegen_->GoesToNextBlock(if_instr->GetBlock(), false_successor) ?
nullptr : codegen_->GetLabelOf(false_successor);
GenerateTestAndBranch(if_instr, /* condition_input_index */ 0, true_target, false_target);
}
void LocationsBuilderARM::VisitDeoptimize(HDeoptimize* deoptimize) {
LocationSummary* locations = new (GetGraph()->GetArena())
LocationSummary(deoptimize, LocationSummary::kCallOnSlowPath);
if (IsBooleanValueOrMaterializedCondition(deoptimize->InputAt(0))) {
locations->SetInAt(0, Location::RequiresRegister());
}
}
void InstructionCodeGeneratorARM::VisitDeoptimize(HDeoptimize* deoptimize) {
SlowPathCode* slow_path = deopt_slow_paths_.NewSlowPath<DeoptimizationSlowPathARM>(deoptimize);
GenerateTestAndBranch(deoptimize,
/* condition_input_index */ 0,
slow_path->GetEntryLabel(),
/* false_target */ nullptr);
}
void LocationsBuilderARM::VisitSelect(HSelect* select) {
LocationSummary* locations = new (GetGraph()->GetArena()) LocationSummary(select);
if (Primitive::IsFloatingPointType(select->GetType())) {
locations->SetInAt(0, Location::RequiresFpuRegister());
locations->SetInAt(1, Location::RequiresFpuRegister());
} else {
locations->SetInAt(0, Location::RequiresRegister());
locations->SetInAt(1, Location::RequiresRegister());
}
if (IsBooleanValueOrMaterializedCondition(select->GetCondition())) {
locations->SetInAt(2, Location::RequiresRegister());
}
locations->SetOut(Location::SameAsFirstInput());
}
void InstructionCodeGeneratorARM::VisitSelect(HSelect* select) {
LocationSummary* locations = select->GetLocations();
Label false_target;
GenerateTestAndBranch(select,
/* condition_input_index */ 2,
/* true_target */ nullptr,
&false_target);
codegen_->MoveLocation(locations->Out(), locations->InAt(1), select->GetType());
__ Bind(&false_target);
}
void LocationsBuilderARM::VisitNativeDebugInfo(HNativeDebugInfo* info) {
new (GetGraph()->GetArena()) LocationSummary(info);
}
void InstructionCodeGeneratorARM::VisitNativeDebugInfo(HNativeDebugInfo* info) {
codegen_->MaybeRecordNativeDebugInfo(info, info->GetDexPc());
}
void CodeGeneratorARM::GenerateNop() {
__ nop();
}
void LocationsBuilderARM::HandleCondition(HCondition* cond) {
LocationSummary* locations =
new (GetGraph()->GetArena()) LocationSummary(cond, LocationSummary::kNoCall);
// Handle the long/FP comparisons made in instruction simplification.
switch (cond->InputAt(0)->GetType()) {
case Primitive::kPrimLong:
locations->SetInAt(0, Location::RequiresRegister());
locations->SetInAt(1, Location::RegisterOrConstant(cond->InputAt(1)));
if (!cond->IsEmittedAtUseSite()) {
locations->SetOut(Location::RequiresRegister(), Location::kOutputOverlap);
}
break;
case Primitive::kPrimFloat:
case Primitive::kPrimDouble:
locations->SetInAt(0, Location::RequiresFpuRegister());
locations->SetInAt(1, Location::RequiresFpuRegister());
if (!cond->IsEmittedAtUseSite()) {
locations->SetOut(Location::RequiresRegister(), Location::kNoOutputOverlap);
}
break;
default:
locations->SetInAt(0, Location::RequiresRegister());
locations->SetInAt(1, Location::RegisterOrConstant(cond->InputAt(1)));
if (!cond->IsEmittedAtUseSite()) {
locations->SetOut(Location::RequiresRegister(), Location::kNoOutputOverlap);
}
}
}
void InstructionCodeGeneratorARM::HandleCondition(HCondition* cond) {
if (cond->IsEmittedAtUseSite()) {
return;
}
LocationSummary* locations = cond->GetLocations();
Location left = locations->InAt(0);
Location right = locations->InAt(1);
Register out = locations->Out().AsRegister<Register>();
Label true_label, false_label;
switch (cond->InputAt(0)->GetType()) {
default: {
// Integer case.
if (right.IsRegister()) {
__ cmp(left.AsRegister<Register>(), ShifterOperand(right.AsRegister<Register>()));
} else {
DCHECK(right.IsConstant());
__ CmpConstant(left.AsRegister<Register>(),
CodeGenerator::GetInt32ValueOf(right.GetConstant()));
}
__ it(ARMCondition(cond->GetCondition()), kItElse);
__ mov(locations->Out().AsRegister<Register>(), ShifterOperand(1),
ARMCondition(cond->GetCondition()));
__ mov(locations->Out().AsRegister<Register>(), ShifterOperand(0),
ARMCondition(cond->GetOppositeCondition()));
return;
}
case Primitive::kPrimLong:
GenerateLongComparesAndJumps(cond, &true_label, &false_label);
break;
case Primitive::kPrimFloat:
__ vcmps(left.AsFpuRegister<SRegister>(), right.AsFpuRegister<SRegister>());
GenerateFPJumps(cond, &true_label, &false_label);
break;
case Primitive::kPrimDouble:
__ vcmpd(FromLowSToD(left.AsFpuRegisterPairLow<SRegister>()),
FromLowSToD(right.AsFpuRegisterPairLow<SRegister>()));
GenerateFPJumps(cond, &true_label, &false_label);
break;
}
// Convert the jumps into the result.
Label done_label;
// False case: result = 0.
__ Bind(&false_label);
__ LoadImmediate(out, 0);
__ b(&done_label);
// True case: result = 1.
__ Bind(&true_label);
__ LoadImmediate(out, 1);
__ Bind(&done_label);
}
void LocationsBuilderARM::VisitEqual(HEqual* comp) {
HandleCondition(comp);
}
void InstructionCodeGeneratorARM::VisitEqual(HEqual* comp) {
HandleCondition(comp);
}
void LocationsBuilderARM::VisitNotEqual(HNotEqual* comp) {
HandleCondition(comp);
}
void InstructionCodeGeneratorARM::VisitNotEqual(HNotEqual* comp) {
HandleCondition(comp);
}
void LocationsBuilderARM::VisitLessThan(HLessThan* comp) {
HandleCondition(comp);
}
void InstructionCodeGeneratorARM::VisitLessThan(HLessThan* comp) {
HandleCondition(comp);
}
void LocationsBuilderARM::VisitLessThanOrEqual(HLessThanOrEqual* comp) {
HandleCondition(comp);
}
void InstructionCodeGeneratorARM::VisitLessThanOrEqual(HLessThanOrEqual* comp) {
HandleCondition(comp);
}
void LocationsBuilderARM::VisitGreaterThan(HGreaterThan* comp) {
HandleCondition(comp);
}
void InstructionCodeGeneratorARM::VisitGreaterThan(HGreaterThan* comp) {
HandleCondition(comp);
}
void LocationsBuilderARM::VisitGreaterThanOrEqual(HGreaterThanOrEqual* comp) {
HandleCondition(comp);
}
void InstructionCodeGeneratorARM::VisitGreaterThanOrEqual(HGreaterThanOrEqual* comp) {
HandleCondition(comp);
}
void LocationsBuilderARM::VisitBelow(HBelow* comp) {
HandleCondition(comp);
}
void InstructionCodeGeneratorARM::VisitBelow(HBelow* comp) {
HandleCondition(comp);
}
void LocationsBuilderARM::VisitBelowOrEqual(HBelowOrEqual* comp) {
HandleCondition(comp);
}
void InstructionCodeGeneratorARM::VisitBelowOrEqual(HBelowOrEqual* comp) {
HandleCondition(comp);
}
void LocationsBuilderARM::VisitAbove(HAbove* comp) {
HandleCondition(comp);
}
void InstructionCodeGeneratorARM::VisitAbove(HAbove* comp) {
HandleCondition(comp);
}
void LocationsBuilderARM::VisitAboveOrEqual(HAboveOrEqual* comp) {
HandleCondition(comp);
}
void InstructionCodeGeneratorARM::VisitAboveOrEqual(HAboveOrEqual* comp) {
HandleCondition(comp);
}
void LocationsBuilderARM::VisitLocal(HLocal* local) {
local->SetLocations(nullptr);
}
void InstructionCodeGeneratorARM::VisitLocal(HLocal* local) {
DCHECK_EQ(local->GetBlock(), GetGraph()->GetEntryBlock());
}
void LocationsBuilderARM::VisitLoadLocal(HLoadLocal* load) {
load->SetLocations(nullptr);
}
void InstructionCodeGeneratorARM::VisitLoadLocal(HLoadLocal* load ATTRIBUTE_UNUSED) {
// Nothing to do, this is driven by the code generator.
}
void LocationsBuilderARM::VisitStoreLocal(HStoreLocal* store) {
LocationSummary* locations =
new (GetGraph()->GetArena()) LocationSummary(store, LocationSummary::kNoCall);
switch (store->InputAt(1)->GetType()) {
case Primitive::kPrimBoolean:
case Primitive::kPrimByte:
case Primitive::kPrimChar:
case Primitive::kPrimShort:
case Primitive::kPrimInt:
case Primitive::kPrimNot:
case Primitive::kPrimFloat:
locations->SetInAt(1, Location::StackSlot(codegen_->GetStackSlot(store->GetLocal())));
break;
case Primitive::kPrimLong:
case Primitive::kPrimDouble:
locations->SetInAt(1, Location::DoubleStackSlot(codegen_->GetStackSlot(store->GetLocal())));
break;
default:
LOG(FATAL) << "Unexpected local type " << store->InputAt(1)->GetType();
}
}
void InstructionCodeGeneratorARM::VisitStoreLocal(HStoreLocal* store ATTRIBUTE_UNUSED) {
}
void LocationsBuilderARM::VisitIntConstant(HIntConstant* constant) {
LocationSummary* locations =
new (GetGraph()->GetArena()) LocationSummary(constant, LocationSummary::kNoCall);
locations->SetOut(Location::ConstantLocation(constant));
}
void InstructionCodeGeneratorARM::VisitIntConstant(HIntConstant* constant ATTRIBUTE_UNUSED) {
// Will be generated at use site.
}
void LocationsBuilderARM::VisitNullConstant(HNullConstant* constant) {
LocationSummary* locations =
new (GetGraph()->GetArena()) LocationSummary(constant, LocationSummary::kNoCall);
locations->SetOut(Location::ConstantLocation(constant));
}
void InstructionCodeGeneratorARM::VisitNullConstant(HNullConstant* constant ATTRIBUTE_UNUSED) {
// Will be generated at use site.
}
void LocationsBuilderARM::VisitLongConstant(HLongConstant* constant) {
LocationSummary* locations =
new (GetGraph()->GetArena()) LocationSummary(constant, LocationSummary::kNoCall);
locations->SetOut(Location::ConstantLocation(constant));
}
void InstructionCodeGeneratorARM::VisitLongConstant(HLongConstant* constant ATTRIBUTE_UNUSED) {
// Will be generated at use site.
}
void LocationsBuilderARM::VisitFloatConstant(HFloatConstant* constant) {
LocationSummary* locations =
new (GetGraph()->GetArena()) LocationSummary(constant, LocationSummary::kNoCall);
locations->SetOut(Location::ConstantLocation(constant));
}
void InstructionCodeGeneratorARM::VisitFloatConstant(HFloatConstant* constant ATTRIBUTE_UNUSED) {
// Will be generated at use site.
}
void LocationsBuilderARM::VisitDoubleConstant(HDoubleConstant* constant) {
LocationSummary* locations =
new (GetGraph()->GetArena()) LocationSummary(constant, LocationSummary::kNoCall);
locations->SetOut(Location::ConstantLocation(constant));
}
void InstructionCodeGeneratorARM::VisitDoubleConstant(HDoubleConstant* constant ATTRIBUTE_UNUSED) {
// Will be generated at use site.
}
void LocationsBuilderARM::VisitMemoryBarrier(HMemoryBarrier* memory_barrier) {
memory_barrier->SetLocations(nullptr);
}
void InstructionCodeGeneratorARM::VisitMemoryBarrier(HMemoryBarrier* memory_barrier) {
codegen_->GenerateMemoryBarrier(memory_barrier->GetBarrierKind());
}
void LocationsBuilderARM::VisitReturnVoid(HReturnVoid* ret) {
ret->SetLocations(nullptr);
}
void InstructionCodeGeneratorARM::VisitReturnVoid(HReturnVoid* ret ATTRIBUTE_UNUSED) {
codegen_->GenerateFrameExit();
}
void LocationsBuilderARM::VisitReturn(HReturn* ret) {
LocationSummary* locations =
new (GetGraph()->GetArena()) LocationSummary(ret, LocationSummary::kNoCall);
locations->SetInAt(0, parameter_visitor_.GetReturnLocation(ret->InputAt(0)->GetType()));
}
void InstructionCodeGeneratorARM::VisitReturn(HReturn* ret ATTRIBUTE_UNUSED) {
codegen_->GenerateFrameExit();
}
void LocationsBuilderARM::VisitInvokeUnresolved(HInvokeUnresolved* invoke) {
// The trampoline uses the same calling convention as dex calling conventions,
// except instead of loading arg0/r0 with the target Method*, arg0/r0 will contain
// the method_idx.
HandleInvoke(invoke);
}
void InstructionCodeGeneratorARM::VisitInvokeUnresolved(HInvokeUnresolved* invoke) {
codegen_->GenerateInvokeUnresolvedRuntimeCall(invoke);
}
void LocationsBuilderARM::VisitInvokeStaticOrDirect(HInvokeStaticOrDirect* invoke) {
// Explicit clinit checks triggered by static invokes must have been pruned by
// art::PrepareForRegisterAllocation.
DCHECK(!invoke->IsStaticWithExplicitClinitCheck());
IntrinsicLocationsBuilderARM intrinsic(GetGraph()->GetArena(),
codegen_->GetAssembler(),
codegen_->GetInstructionSetFeatures());
if (intrinsic.TryDispatch(invoke)) {
if (invoke->GetLocations()->CanCall() && invoke->HasPcRelativeDexCache()) {
invoke->GetLocations()->SetInAt(invoke->GetSpecialInputIndex(), Location::Any());
}
return;
}
HandleInvoke(invoke);
// For PC-relative dex cache the invoke has an extra input, the PC-relative address base.
if (invoke->HasPcRelativeDexCache()) {
invoke->GetLocations()->SetInAt(invoke->GetSpecialInputIndex(), Location::RequiresRegister());
}
}
static bool TryGenerateIntrinsicCode(HInvoke* invoke, CodeGeneratorARM* codegen) {
if (invoke->GetLocations()->Intrinsified()) {
IntrinsicCodeGeneratorARM intrinsic(codegen);
intrinsic.Dispatch(invoke);
return true;
}
return false;
}
void InstructionCodeGeneratorARM::VisitInvokeStaticOrDirect(HInvokeStaticOrDirect* invoke) {
// Explicit clinit checks triggered by static invokes must have been pruned by
// art::PrepareForRegisterAllocation.
DCHECK(!invoke->IsStaticWithExplicitClinitCheck());
if (TryGenerateIntrinsicCode(invoke, codegen_)) {
return;
}
LocationSummary* locations = invoke->GetLocations();
codegen_->GenerateStaticOrDirectCall(
invoke, locations->HasTemps() ? locations->GetTemp(0) : Location::NoLocation());
codegen_->RecordPcInfo(invoke, invoke->GetDexPc());
}
void LocationsBuilderARM::HandleInvoke(HInvoke* invoke) {
InvokeDexCallingConventionVisitorARM calling_convention_visitor;
CodeGenerator::CreateCommonInvokeLocationSummary(invoke, &calling_convention_visitor);
}
void LocationsBuilderARM::VisitInvokeVirtual(HInvokeVirtual* invoke) {
IntrinsicLocationsBuilderARM intrinsic(GetGraph()->GetArena(),
codegen_->GetAssembler(),
codegen_->GetInstructionSetFeatures());
if (intrinsic.TryDispatch(invoke)) {
return;
}
HandleInvoke(invoke);
}
void InstructionCodeGeneratorARM::VisitInvokeVirtual(HInvokeVirtual* invoke) {
if (TryGenerateIntrinsicCode(invoke, codegen_)) {
return;
}
codegen_->GenerateVirtualCall(invoke, invoke->GetLocations()->GetTemp(0));
DCHECK(!codegen_->IsLeafMethod());
codegen_->RecordPcInfo(invoke, invoke->GetDexPc());
}
void LocationsBuilderARM::VisitInvokeInterface(HInvokeInterface* invoke) {
HandleInvoke(invoke);
// Add the hidden argument.
invoke->GetLocations()->AddTemp(Location::RegisterLocation(R12));
}
void InstructionCodeGeneratorARM::VisitInvokeInterface(HInvokeInterface* invoke) {
// TODO: b/18116999, our IMTs can miss an IncompatibleClassChangeError.
LocationSummary* locations = invoke->GetLocations();
Register temp = locations->GetTemp(0).AsRegister<Register>();
Register hidden_reg = locations->GetTemp(1).AsRegister<Register>();
uint32_t method_offset = mirror::Class::EmbeddedImTableEntryOffset(
invoke->GetImtIndex() % mirror::Class::kImtSize, kArmPointerSize).Uint32Value();
Location receiver = locations->InAt(0);
uint32_t class_offset = mirror::Object::ClassOffset().Int32Value();
// Set the hidden argument. This is safe to do this here, as R12
// won't be modified thereafter, before the `blx` (call) instruction.
DCHECK_EQ(R12, hidden_reg);
__ LoadImmediate(hidden_reg, invoke->GetDexMethodIndex());
if (receiver.IsStackSlot()) {
__ LoadFromOffset(kLoadWord, temp, SP, receiver.GetStackIndex());
// /* HeapReference<Class> */ temp = temp->klass_
__ LoadFromOffset(kLoadWord, temp, temp, class_offset);
} else {
// /* HeapReference<Class> */ temp = receiver->klass_
__ LoadFromOffset(kLoadWord, temp, receiver.AsRegister<Register>(), class_offset);
}
codegen_->MaybeRecordImplicitNullCheck(invoke);
// Instead of simply (possibly) unpoisoning `temp` here, we should
// emit a read barrier for the previous class reference load.
// However this is not required in practice, as this is an
// intermediate/temporary reference and because the current
// concurrent copying collector keeps the from-space memory
// intact/accessible until the end of the marking phase (the
// concurrent copying collector may not in the future).
__ MaybeUnpoisonHeapReference(temp);
// temp = temp->GetImtEntryAt(method_offset);
uint32_t entry_point =
ArtMethod::EntryPointFromQuickCompiledCodeOffset(kArmWordSize).Int32Value();
__ LoadFromOffset(kLoadWord, temp, temp, method_offset);
// LR = temp->GetEntryPoint();
__ LoadFromOffset(kLoadWord, LR, temp, entry_point);
// LR();
__ blx(LR);
DCHECK(!codegen_->IsLeafMethod());
codegen_->RecordPcInfo(invoke, invoke->GetDexPc());
}
void LocationsBuilderARM::VisitNeg(HNeg* neg) {
LocationSummary* locations =
new (GetGraph()->GetArena()) LocationSummary(neg, LocationSummary::kNoCall);
switch (neg->GetResultType()) {
case Primitive::kPrimInt: {
locations->SetInAt(0, Location::RequiresRegister());
locations->SetOut(Location::RequiresRegister(), Location::kNoOutputOverlap);
break;
}
case Primitive::kPrimLong: {
locations->SetInAt(0, Location::RequiresRegister());
locations->SetOut(Location::RequiresRegister(), Location::kOutputOverlap);
break;
}
case Primitive::kPrimFloat:
case Primitive::kPrimDouble:
locations->SetInAt(0, Location::RequiresFpuRegister());
locations->SetOut(Location::RequiresFpuRegister(), Location::kNoOutputOverlap);
break;
default:
LOG(FATAL) << "Unexpected neg type " << neg->GetResultType();
}
}
void InstructionCodeGeneratorARM::VisitNeg(HNeg* neg) {
LocationSummary* locations = neg->GetLocations();
Location out = locations->Out();
Location in = locations->InAt(0);
switch (neg->GetResultType()) {
case Primitive::kPrimInt:
DCHECK(in.IsRegister());
__ rsb(out.AsRegister<Register>(), in.AsRegister<Register>(), ShifterOperand(0));
break;
case Primitive::kPrimLong:
DCHECK(in.IsRegisterPair());
// out.lo = 0 - in.lo (and update the carry/borrow (C) flag)
__ rsbs(out.AsRegisterPairLow<Register>(),
in.AsRegisterPairLow<Register>(),
ShifterOperand(0));
// We cannot emit an RSC (Reverse Subtract with Carry)
// instruction here, as it does not exist in the Thumb-2
// instruction set. We use the following approach
// using SBC and SUB instead.
//
// out.hi = -C
__ sbc(out.AsRegisterPairHigh<Register>(),
out.AsRegisterPairHigh<Register>(),
ShifterOperand(out.AsRegisterPairHigh<Register>()));
// out.hi = out.hi - in.hi
__ sub(out.AsRegisterPairHigh<Register>(),
out.AsRegisterPairHigh<Register>(),
ShifterOperand(in.AsRegisterPairHigh<Register>()));
break;
case Primitive::kPrimFloat:
DCHECK(in.IsFpuRegister());
__ vnegs(out.AsFpuRegister<SRegister>(), in.AsFpuRegister<SRegister>());
break;
case Primitive::kPrimDouble:
DCHECK(in.IsFpuRegisterPair());
__ vnegd(FromLowSToD(out.AsFpuRegisterPairLow<SRegister>()),
FromLowSToD(in.AsFpuRegisterPairLow<SRegister>()));
break;
default:
LOG(FATAL) << "Unexpected neg type " << neg->GetResultType();
}
}
void LocationsBuilderARM::VisitTypeConversion(HTypeConversion* conversion) {
Primitive::Type result_type = conversion->GetResultType();
Primitive::Type input_type = conversion->GetInputType();
DCHECK_NE(result_type, input_type);
// The float-to-long, double-to-long and long-to-float type conversions
// rely on a call to the runtime.
LocationSummary::CallKind call_kind =
(((input_type == Primitive::kPrimFloat || input_type == Primitive::kPrimDouble)
&& result_type == Primitive::kPrimLong)
|| (input_type == Primitive::kPrimLong && result_type == Primitive::kPrimFloat))
? LocationSummary::kCall
: LocationSummary::kNoCall;
LocationSummary* locations =
new (GetGraph()->GetArena()) LocationSummary(conversion, call_kind);
// The Java language does not allow treating boolean as an integral type but
// our bit representation makes it safe.
switch (result_type) {
case Primitive::kPrimByte:
switch (input_type) {
case Primitive::kPrimLong:
// Type conversion from long to byte is a result of code transformations.
case Primitive::kPrimBoolean:
// Boolean input is a result of code transformations.
case Primitive::kPrimShort:
case Primitive::kPrimInt:
case Primitive::kPrimChar:
// Processing a Dex `int-to-byte' instruction.
locations->SetInAt(0, Location::RequiresRegister());
locations->SetOut(Location::RequiresRegister(), Location::kNoOutputOverlap);
break;
default:
LOG(FATAL) << "Unexpected type conversion from " << input_type
<< " to " << result_type;
}
break;
case Primitive::kPrimShort:
switch (input_type) {
case Primitive::kPrimLong:
// Type conversion from long to short is a result of code transformations.
case Primitive::kPrimBoolean:
// Boolean input is a result of code transformations.
case Primitive::kPrimByte:
case Primitive::kPrimInt:
case Primitive::kPrimChar:
// Processing a Dex `int-to-short' instruction.
locations->SetInAt(0, Location::RequiresRegister());
locations->SetOut(Location::RequiresRegister(), Location::kNoOutputOverlap);
break;
default:
LOG(FATAL) << "Unexpected type conversion from " << input_type
<< " to " << result_type;
}
break;
case Primitive::kPrimInt:
switch (input_type) {
case Primitive::kPrimLong:
// Processing a Dex `long-to-int' instruction.
locations->SetInAt(0, Location::Any());
locations->SetOut(Location::RequiresRegister(), Location::kNoOutputOverlap);
break;
case Primitive::kPrimFloat:
// Processing a Dex `float-to-int' instruction.
locations->SetInAt(0, Location::RequiresFpuRegister());
locations->SetOut(Location::RequiresRegister());
locations->AddTemp(Location::RequiresFpuRegister());
break;
case Primitive::kPrimDouble:
// Processing a Dex `double-to-int' instruction.
locations->SetInAt(0, Location::RequiresFpuRegister());
locations->SetOut(Location::RequiresRegister());
locations->AddTemp(Location::RequiresFpuRegister());
break;
default:
LOG(FATAL) << "Unexpected type conversion from " << input_type
<< " to " << result_type;
}
break;
case Primitive::kPrimLong:
switch (input_type) {
case Primitive::kPrimBoolean:
// Boolean input is a result of code transformations.
case Primitive::kPrimByte:
case Primitive::kPrimShort:
case Primitive::kPrimInt:
case Primitive::kPrimChar:
// Processing a Dex `int-to-long' instruction.
locations->SetInAt(0, Location::RequiresRegister());
locations->SetOut(Location::RequiresRegister(), Location::kNoOutputOverlap);
break;
case Primitive::kPrimFloat: {
// Processing a Dex `float-to-long' instruction.
InvokeRuntimeCallingConvention calling_convention;
locations->SetInAt(0, Location::FpuRegisterLocation(
calling_convention.GetFpuRegisterAt(0)));
locations->SetOut(Location::RegisterPairLocation(R0, R1));
break;
}
case Primitive::kPrimDouble: {
// Processing a Dex `double-to-long' instruction.
InvokeRuntimeCallingConvention calling_convention;
locations->SetInAt(0, Location::FpuRegisterPairLocation(
calling_convention.GetFpuRegisterAt(0),
calling_convention.GetFpuRegisterAt(1)));
locations->SetOut(Location::RegisterPairLocation(R0, R1));
break;
}
default:
LOG(FATAL) << "Unexpected type conversion from " << input_type
<< " to " << result_type;
}
break;
case Primitive::kPrimChar:
switch (input_type) {
case Primitive::kPrimLong:
// Type conversion from long to char is a result of code transformations.
case Primitive::kPrimBoolean:
// Boolean input is a result of code transformations.
case Primitive::kPrimByte:
case Primitive::kPrimShort:
case Primitive::kPrimInt:
// Processing a Dex `int-to-char' instruction.
locations->SetInAt(0, Location::RequiresRegister());
locations->SetOut(Location::RequiresRegister(), Location::kNoOutputOverlap);
break;
default:
LOG(FATAL) << "Unexpected type conversion from " << input_type
<< " to " << result_type;
}
break;
case Primitive::kPrimFloat:
switch (input_type) {
case Primitive::kPrimBoolean:
// Boolean input is a result of code transformations.
case Primitive::kPrimByte:
case Primitive::kPrimShort:
case Primitive::kPrimInt:
case Primitive::kPrimChar:
// Processing a Dex `int-to-float' instruction.
locations->SetInAt(0, Location::RequiresRegister());
locations->SetOut(Location::RequiresFpuRegister());
break;
case Primitive::kPrimLong: {
// Processing a Dex `long-to-float' instruction.
InvokeRuntimeCallingConvention calling_convention;
locations->SetInAt(0, Location::RegisterPairLocation(
calling_convention.GetRegisterAt(0), calling_convention.GetRegisterAt(1)));
locations->SetOut(Location::FpuRegisterLocation(calling_convention.GetFpuRegisterAt(0)));
break;
}
case Primitive::kPrimDouble:
// Processing a Dex `double-to-float' instruction.
locations->SetInAt(0, Location::RequiresFpuRegister());
locations->SetOut(Location::RequiresFpuRegister(), Location::kNoOutputOverlap);
break;
default:
LOG(FATAL) << "Unexpected type conversion from " << input_type
<< " to " << result_type;
};
break;
case Primitive::kPrimDouble:
switch (input_type) {
case Primitive::kPrimBoolean:
// Boolean input is a result of code transformations.
case Primitive::kPrimByte:
case Primitive::kPrimShort:
case Primitive::kPrimInt:
case Primitive::kPrimChar:
// Processing a Dex `int-to-double' instruction.
locations->SetInAt(0, Location::RequiresRegister());
locations->SetOut(Location::RequiresFpuRegister());
break;
case Primitive::kPrimLong:
// Processing a Dex `long-to-double' instruction.
locations->SetInAt(0, Location::RequiresRegister());
locations->SetOut(Location::RequiresFpuRegister());
locations->AddTemp(Location::RequiresFpuRegister());
locations->AddTemp(Location::RequiresFpuRegister());
break;
case Primitive::kPrimFloat:
// Processing a Dex `float-to-double' instruction.
locations->SetInAt(0, Location::RequiresFpuRegister());
locations->SetOut(Location::RequiresFpuRegister(), Location::kNoOutputOverlap);
break;
default:
LOG(FATAL) << "Unexpected type conversion from " << input_type
<< " to " << result_type;
};
break;
default:
LOG(FATAL) << "Unexpected type conversion from " << input_type
<< " to " << result_type;
}
}
void InstructionCodeGeneratorARM::VisitTypeConversion(HTypeConversion* conversion) {
LocationSummary* locations = conversion->GetLocations();
Location out = locations->Out();
Location in = locations->InAt(0);
Primitive::Type result_type = conversion->GetResultType();
Primitive::Type input_type = conversion->GetInputType();
DCHECK_NE(result_type, input_type);
switch (result_type) {
case Primitive::kPrimByte:
switch (input_type) {
case Primitive::kPrimLong:
// Type conversion from long to byte is a result of code transformations.
__ sbfx(out.AsRegister<Register>(), in.AsRegisterPairLow<Register>(), 0, 8);
break;
case Primitive::kPrimBoolean:
// Boolean input is a result of code transformations.
case Primitive::kPrimShort:
case Primitive::kPrimInt:
case Primitive::kPrimChar:
// Processing a Dex `int-to-byte' instruction.
__ sbfx(out.AsRegister<Register>(), in.AsRegister<Register>(), 0, 8);
break;
default:
LOG(FATAL) << "Unexpected type conversion from " << input_type
<< " to " << result_type;
}
break;
case Primitive::kPrimShort:
switch (input_type) {
case Primitive::kPrimLong:
// Type conversion from long to short is a result of code transformations.
__ sbfx(out.AsRegister<Register>(), in.AsRegisterPairLow<Register>(), 0, 16);
break;
case Primitive::kPrimBoolean:
// Boolean input is a result of code transformations.
case Primitive::kPrimByte:
case Primitive::kPrimInt:
case Primitive::kPrimChar:
// Processing a Dex `int-to-short' instruction.
__ sbfx(out.AsRegister<Register>(), in.AsRegister<Register>(), 0, 16);
break;
default:
LOG(FATAL) << "Unexpected type conversion from " << input_type
<< " to " << result_type;
}
break;
case Primitive::kPrimInt:
switch (input_type) {
case Primitive::kPrimLong:
// Processing a Dex `long-to-int' instruction.
DCHECK(out.IsRegister());
if (in.IsRegisterPair()) {
__ Mov(out.AsRegister<Register>(), in.AsRegisterPairLow<Register>());
} else if (in.IsDoubleStackSlot()) {
__ LoadFromOffset(kLoadWord, out.AsRegister<Register>(), SP, in.GetStackIndex());
} else {
DCHECK(in.IsConstant());
DCHECK(in.GetConstant()->IsLongConstant());
int64_t value = in.GetConstant()->AsLongConstant()->GetValue();
__ LoadImmediate(out.AsRegister<Register>(), static_cast<int32_t>(value));
}
break;
case Primitive::kPrimFloat: {
// Processing a Dex `float-to-int' instruction.
SRegister temp = locations->GetTemp(0).AsFpuRegisterPairLow<SRegister>();
__ vmovs(temp, in.AsFpuRegister<SRegister>());
__ vcvtis(temp, temp);
__ vmovrs(out.AsRegister<Register>(), temp);
break;
}
case Primitive::kPrimDouble: {
// Processing a Dex `double-to-int' instruction.
SRegister temp_s = locations->GetTemp(0).AsFpuRegisterPairLow<SRegister>();
DRegister temp_d = FromLowSToD(temp_s);
__ vmovd(temp_d, FromLowSToD(in.AsFpuRegisterPairLow<SRegister>()));
__ vcvtid(temp_s, temp_d);
__ vmovrs(out.AsRegister<Register>(), temp_s);
break;
}
default:
LOG(FATAL) << "Unexpected type conversion from " << input_type
<< " to " << result_type;
}
break;
case Primitive::kPrimLong:
switch (input_type) {
case Primitive::kPrimBoolean:
// Boolean input is a result of code transformations.
case Primitive::kPrimByte:
case Primitive::kPrimShort:
case Primitive::kPrimInt:
case Primitive::kPrimChar:
// Processing a Dex `int-to-long' instruction.
DCHECK(out.IsRegisterPair());
DCHECK(in.IsRegister());
__ Mov(out.AsRegisterPairLow<Register>(), in.AsRegister<Register>());
// Sign extension.
__ Asr(out.AsRegisterPairHigh<Register>(),
out.AsRegisterPairLow<Register>(),
31);
break;
case Primitive::kPrimFloat:
// Processing a Dex `float-to-long' instruction.
codegen_->InvokeRuntime(QUICK_ENTRY_POINT(pF2l),
conversion,
conversion->GetDexPc(),
nullptr);
CheckEntrypointTypes<kQuickF2l, int64_t, float>();
break;
case Primitive::kPrimDouble:
// Processing a Dex `double-to-long' instruction.
codegen_->InvokeRuntime(QUICK_ENTRY_POINT(pD2l),
conversion,
conversion->GetDexPc(),
nullptr);
CheckEntrypointTypes<kQuickD2l, int64_t, double>();
break;
default:
LOG(FATAL) << "Unexpected type conversion from " << input_type
<< " to " << result_type;
}
break;
case Primitive::kPrimChar:
switch (input_type) {
case Primitive::kPrimLong:
// Type conversion from long to char is a result of code transformations.
__ ubfx(out.AsRegister<Register>(), in.AsRegisterPairLow<Register>(), 0, 16);
break;
case Primitive::kPrimBoolean:
// Boolean input is a result of code transformations.
case Primitive::kPrimByte:
case Primitive::kPrimShort:
case Primitive::kPrimInt:
// Processing a Dex `int-to-char' instruction.
__ ubfx(out.AsRegister<Register>(), in.AsRegister<Register>(), 0, 16);
break;
default:
LOG(FATAL) << "Unexpected type conversion from " << input_type
<< " to " << result_type;
}
break;
case Primitive::kPrimFloat:
switch (input_type) {
case Primitive::kPrimBoolean:
// Boolean input is a result of code transformations.
case Primitive::kPrimByte:
case Primitive::kPrimShort:
case Primitive::kPrimInt:
case Primitive::kPrimChar: {
// Processing a Dex `int-to-float' instruction.
__ vmovsr(out.AsFpuRegister<SRegister>(), in.AsRegister<Register>());
__ vcvtsi(out.AsFpuRegister<SRegister>(), out.AsFpuRegister<SRegister>());
break;
}
case Primitive::kPrimLong:
// Processing a Dex `long-to-float' instruction.
codegen_->InvokeRuntime(QUICK_ENTRY_POINT(pL2f),
conversion,
conversion->GetDexPc(),
nullptr);
CheckEntrypointTypes<kQuickL2f, float, int64_t>();
break;
case Primitive::kPrimDouble:
// Processing a Dex `double-to-float' instruction.
__ vcvtsd(out.AsFpuRegister<SRegister>(),
FromLowSToD(in.AsFpuRegisterPairLow<SRegister>()));
break;
default:
LOG(FATAL) << "Unexpected type conversion from " << input_type
<< " to " << result_type;
};
break;
case Primitive::kPrimDouble:
switch (input_type) {
case Primitive::kPrimBoolean:
// Boolean input is a result of code transformations.
case Primitive::kPrimByte:
case Primitive::kPrimShort:
case Primitive::kPrimInt:
case Primitive::kPrimChar: {
// Processing a Dex `int-to-double' instruction.
__ vmovsr(out.AsFpuRegisterPairLow<SRegister>(), in.AsRegister<Register>());
__ vcvtdi(FromLowSToD(out.AsFpuRegisterPairLow<SRegister>()),
out.AsFpuRegisterPairLow<SRegister>());
break;
}
case Primitive::kPrimLong: {
// Processing a Dex `long-to-double' instruction.
Register low = in.AsRegisterPairLow<Register>();
Register high = in.AsRegisterPairHigh<Register>();
SRegister out_s = out.AsFpuRegisterPairLow<SRegister>();
DRegister out_d = FromLowSToD(out_s);
SRegister temp_s = locations->GetTemp(0).AsFpuRegisterPairLow<SRegister>();
DRegister temp_d = FromLowSToD(temp_s);
SRegister constant_s = locations->GetTemp(1).AsFpuRegisterPairLow<SRegister>();
DRegister constant_d = FromLowSToD(constant_s);
// temp_d = int-to-double(high)
__ vmovsr(temp_s, high);
__ vcvtdi(temp_d, temp_s);
// constant_d = k2Pow32EncodingForDouble
__ LoadDImmediate(constant_d, bit_cast<double, int64_t>(k2Pow32EncodingForDouble));
// out_d = unsigned-to-double(low)
__ vmovsr(out_s, low);
__ vcvtdu(out_d, out_s);
// out_d += temp_d * constant_d
__ vmlad(out_d, temp_d, constant_d);
break;
}
case Primitive::kPrimFloat:
// Processing a Dex `float-to-double' instruction.
__ vcvtds(FromLowSToD(out.AsFpuRegisterPairLow<SRegister>()),
in.AsFpuRegister<SRegister>());
break;
default:
LOG(FATAL) << "Unexpected type conversion from " << input_type
<< " to " << result_type;
};
break;
default:
LOG(FATAL) << "Unexpected type conversion from " << input_type
<< " to " << result_type;
}
}