AArch64: Add inlining support for ceil(), floor(), rint(), round()
This patch adds inlining support for the following Math, StrictMath
methods in the ARM64 backend:
* double ceil(double)
* double floor(double)
* double rint(double)
* long round(double)
* int round(float)
Also some cleanup.
Change-Id: I9f5a2f4065b1313649f4b0c4380b8176703c3fe1
Signed-off-by: Serban Constantinescu <serban.constantinescu@arm.com>
diff --git a/compiler/dex/quick/arm64/arm64_lir.h b/compiler/dex/quick/arm64/arm64_lir.h
index 90cb156..a449cbd 100644
--- a/compiler/dex/quick/arm64/arm64_lir.h
+++ b/compiler/dex/quick/arm64/arm64_lir.h
@@ -267,6 +267,8 @@
kA64Fcvtzs2xf, // fcvtzs [100111100s111000000000] rn[9-5] rd[4-0].
kA64Fcvt2Ss, // fcvt [0001111000100010110000] rn[9-5] rd[4-0].
kA64Fcvt2sS, // fcvt [0001111001100010010000] rn[9-5] rd[4-0].
+ kA64Fcvtms2ws, // fcvtms [0001111000110000000000] rn[9-5] rd[4-0].
+ kA64Fcvtms2xS, // fcvtms [1001111001110000000000] rn[9-5] rd[4-0].
kA64Fdiv3fff, // fdiv[000111100s1] rm[20-16] [000110] rn[9-5] rd[4-0].
kA64Fmax3fff, // fmax[000111100s1] rm[20-16] [010010] rn[9-5] rd[4-0].
kA64Fmin3fff, // fmin[000111100s1] rm[20-16] [010110] rn[9-5] rd[4-0].
@@ -278,6 +280,9 @@
kA64Fmov2xS, // fmov[1001111001101111000000] rn[9-5] rd[4-0].
kA64Fmul3fff, // fmul[000111100s1] rm[20-16] [000010] rn[9-5] rd[4-0].
kA64Fneg2ff, // fneg[000111100s100001010000] rn[9-5] rd[4-0].
+ kA64Frintp2ff, // frintp [000111100s100100110000] rn[9-5] rd[4-0].
+ kA64Frintm2ff, // frintm [000111100s100101010000] rn[9-5] rd[4-0].
+ kA64Frintn2ff, // frintn [000111100s100100010000] rn[9-5] rd[4-0].
kA64Frintz2ff, // frintz [000111100s100101110000] rn[9-5] rd[4-0].
kA64Fsqrt2ff, // fsqrt[000111100s100001110000] rn[9-5] rd[4-0].
kA64Fsub3fff, // fsub[000111100s1] rm[20-16] [001110] rn[9-5] rd[4-0].
diff --git a/compiler/dex/quick/arm64/assemble_arm64.cc b/compiler/dex/quick/arm64/assemble_arm64.cc
index c46be53..15c89f2 100644
--- a/compiler/dex/quick/arm64/assemble_arm64.cc
+++ b/compiler/dex/quick/arm64/assemble_arm64.cc
@@ -260,6 +260,14 @@
kFmtRegS, 4, 0, kFmtRegD, 9, 5, kFmtUnused, -1, -1,
kFmtUnused, -1, -1, IS_BINARY_OP | REG_DEF0_USE1,
"fcvt", "!0s, !1S", kFixupNone),
+ ENCODING_MAP(kA64Fcvtms2ws, NO_VARIANTS(0x1e300000),
+ kFmtRegW, 4, 0, kFmtRegS, 9, 5, kFmtUnused, -1, -1,
+ kFmtUnused, -1, -1, IS_BINARY_OP | REG_DEF0_USE1,
+ "fcvtms", "!0w, !1s", kFixupNone),
+ ENCODING_MAP(kA64Fcvtms2xS, NO_VARIANTS(0x9e700000),
+ kFmtRegX, 4, 0, kFmtRegD, 9, 5, kFmtUnused, -1, -1,
+ kFmtUnused, -1, -1, IS_BINARY_OP | REG_DEF0_USE1,
+ "fcvtms", "!0x, !1S", kFixupNone),
ENCODING_MAP(FWIDE(kA64Fdiv3fff), FLOAT_VARIANTS(0x1e201800),
kFmtRegF, 4, 0, kFmtRegF, 9, 5, kFmtRegF, 20, 16,
kFmtUnused, -1, -1, IS_TERTIARY_OP | REG_DEF0_USE12,
@@ -304,6 +312,18 @@
kFmtRegF, 4, 0, kFmtRegF, 9, 5, kFmtUnused, -1, -1,
kFmtUnused, -1, -1, IS_BINARY_OP | REG_DEF0_USE1,
"fneg", "!0f, !1f", kFixupNone),
+ ENCODING_MAP(FWIDE(kA64Frintp2ff), FLOAT_VARIANTS(0x1e24c000),
+ kFmtRegF, 4, 0, kFmtRegF, 9, 5, kFmtUnused, -1, -1,
+ kFmtUnused, -1, -1, IS_BINARY_OP | REG_DEF0_USE1,
+ "frintp", "!0f, !1f", kFixupNone),
+ ENCODING_MAP(FWIDE(kA64Frintm2ff), FLOAT_VARIANTS(0x1e254000),
+ kFmtRegF, 4, 0, kFmtRegF, 9, 5, kFmtUnused, -1, -1,
+ kFmtUnused, -1, -1, IS_BINARY_OP | REG_DEF0_USE1,
+ "frintm", "!0f, !1f", kFixupNone),
+ ENCODING_MAP(FWIDE(kA64Frintn2ff), FLOAT_VARIANTS(0x1e244000),
+ kFmtRegF, 4, 0, kFmtRegF, 9, 5, kFmtUnused, -1, -1,
+ kFmtUnused, -1, -1, IS_BINARY_OP | REG_DEF0_USE1,
+ "frintn", "!0f, !1f", kFixupNone),
ENCODING_MAP(FWIDE(kA64Frintz2ff), FLOAT_VARIANTS(0x1e25c000),
kFmtRegF, 4, 0, kFmtRegF, 9, 5, kFmtUnused, -1, -1,
kFmtUnused, -1, -1, IS_BINARY_OP | REG_DEF0_USE1,
@@ -521,7 +541,7 @@
ENCODING_MAP(WIDE(kA64StpPre4ffXD), CUSTOM_VARIANTS(0x2d800000, 0x6d800000),
kFmtRegF, 4, 0, kFmtRegF, 14, 10, kFmtRegXOrSp, 9, 5,
kFmtBitBlt, 21, 15, IS_QUAD_OP | REG_DEF2 | REG_USE012 | IS_STORE,
- "stp", "!0r, !1f, [!2X, #!3D]!!", kFixupNone),
+ "stp", "!0f, !1f, [!2X, #!3D]!!", kFixupNone),
ENCODING_MAP(WIDE(kA64StpPre4rrXD), CUSTOM_VARIANTS(0x29800000, 0xa9800000),
kFmtRegR, 4, 0, kFmtRegR, 14, 10, kFmtRegXOrSp, 9, 5,
kFmtBitBlt, 21, 15, IS_QUAD_OP | REG_DEF2 | REG_USE012 | IS_STORE,
diff --git a/compiler/dex/quick/arm64/codegen_arm64.h b/compiler/dex/quick/arm64/codegen_arm64.h
index 18f2a29..2c587b8 100644
--- a/compiler/dex/quick/arm64/codegen_arm64.h
+++ b/compiler/dex/quick/arm64/codegen_arm64.h
@@ -59,332 +59,348 @@
bool initialized_;
};
- public:
- Arm64Mir2Lir(CompilationUnit* cu, MIRGraph* mir_graph, ArenaAllocator* arena);
+ public:
+ Arm64Mir2Lir(CompilationUnit* cu, MIRGraph* mir_graph, ArenaAllocator* arena);
- // Required for target - codegen helpers.
- bool SmallLiteralDivRem(Instruction::Code dalvik_opcode, bool is_div, RegLocation rl_src,
- RegLocation rl_dest, int lit) OVERRIDE;
- bool SmallLiteralDivRem64(Instruction::Code dalvik_opcode, bool is_div, RegLocation rl_src,
- RegLocation rl_dest, int64_t lit);
- bool HandleEasyDivRem(Instruction::Code dalvik_opcode, bool is_div,
- RegLocation rl_src, RegLocation rl_dest, int lit) OVERRIDE;
- bool HandleEasyDivRem64(Instruction::Code dalvik_opcode, bool is_div,
- RegLocation rl_src, RegLocation rl_dest, int64_t lit);
- bool EasyMultiply(RegLocation rl_src, RegLocation rl_dest, int lit) OVERRIDE;
- LIR* CheckSuspendUsingLoad() OVERRIDE;
- RegStorage LoadHelper(QuickEntrypointEnum trampoline) OVERRIDE;
- LIR* LoadBaseDisp(RegStorage r_base, int displacement, RegStorage r_dest,
- OpSize size, VolatileKind is_volatile) OVERRIDE;
- LIR* LoadRefDisp(RegStorage r_base, int displacement, RegStorage r_dest,
- VolatileKind is_volatile)
- OVERRIDE;
- LIR* LoadBaseIndexed(RegStorage r_base, RegStorage r_index, RegStorage r_dest, int scale,
- OpSize size) OVERRIDE;
- LIR* LoadRefIndexed(RegStorage r_base, RegStorage r_index, RegStorage r_dest, int scale)
- OVERRIDE;
- LIR* LoadConstantNoClobber(RegStorage r_dest, int value);
- LIR* LoadConstantWide(RegStorage r_dest, int64_t value);
- LIR* StoreBaseDisp(RegStorage r_base, int displacement, RegStorage r_src,
- OpSize size, VolatileKind is_volatile) OVERRIDE;
- LIR* StoreRefDisp(RegStorage r_base, int displacement, RegStorage r_src,
- VolatileKind is_volatile) OVERRIDE;
- LIR* StoreBaseIndexed(RegStorage r_base, RegStorage r_index, RegStorage r_src, int scale,
- OpSize size) OVERRIDE;
- LIR* StoreRefIndexed(RegStorage r_base, RegStorage r_index, RegStorage r_src, int scale)
- OVERRIDE;
- void MarkGCCard(RegStorage val_reg, RegStorage tgt_addr_reg) OVERRIDE;
- LIR* OpCmpMemImmBranch(ConditionCode cond, RegStorage temp_reg, RegStorage base_reg,
- int offset, int check_value, LIR* target, LIR** compare) OVERRIDE;
+ // Required for target - codegen helpers.
+ bool SmallLiteralDivRem(Instruction::Code dalvik_opcode, bool is_div, RegLocation rl_src,
+ RegLocation rl_dest, int lit) OVERRIDE;
+ bool HandleEasyDivRem(Instruction::Code dalvik_opcode, bool is_div,
+ RegLocation rl_src, RegLocation rl_dest, int lit) OVERRIDE;
+ bool HandleEasyDivRem64(Instruction::Code dalvik_opcode, bool is_div,
+ RegLocation rl_src, RegLocation rl_dest, int64_t lit);
+ bool EasyMultiply(RegLocation rl_src, RegLocation rl_dest, int lit) OVERRIDE;
+ LIR* CheckSuspendUsingLoad() OVERRIDE;
+ RegStorage LoadHelper(QuickEntrypointEnum trampoline) OVERRIDE;
+ LIR* LoadBaseDisp(RegStorage r_base, int displacement, RegStorage r_dest,
+ OpSize size, VolatileKind is_volatile) OVERRIDE;
+ LIR* LoadRefDisp(RegStorage r_base, int displacement, RegStorage r_dest,
+ VolatileKind is_volatile) OVERRIDE;
+ LIR* LoadBaseIndexed(RegStorage r_base, RegStorage r_index, RegStorage r_dest, int scale,
+ OpSize size) OVERRIDE;
+ LIR* LoadRefIndexed(RegStorage r_base, RegStorage r_index, RegStorage r_dest, int scale)
+ OVERRIDE;
+ LIR* LoadConstantNoClobber(RegStorage r_dest, int value) OVERRIDE;
+ LIR* LoadConstantWide(RegStorage r_dest, int64_t value) OVERRIDE;
+ LIR* StoreBaseDisp(RegStorage r_base, int displacement, RegStorage r_src, OpSize size,
+ VolatileKind is_volatile) OVERRIDE;
+ LIR* StoreRefDisp(RegStorage r_base, int displacement, RegStorage r_src, VolatileKind is_volatile)
+ OVERRIDE;
+ LIR* StoreBaseIndexed(RegStorage r_base, RegStorage r_index, RegStorage r_src, int scale,
+ OpSize size) OVERRIDE;
+ LIR* StoreRefIndexed(RegStorage r_base, RegStorage r_index, RegStorage r_src, int scale) OVERRIDE;
+ void MarkGCCard(RegStorage val_reg, RegStorage tgt_addr_reg) OVERRIDE;
+ LIR* OpCmpMemImmBranch(ConditionCode cond, RegStorage temp_reg, RegStorage base_reg,
+ int offset, int check_value, LIR* target, LIR** compare) OVERRIDE;
- // Required for target - register utilities.
- RegStorage TargetReg(SpecialTargetRegister reg) OVERRIDE;
- RegStorage TargetReg(SpecialTargetRegister symbolic_reg, WideKind wide_kind) OVERRIDE {
- if (wide_kind == kWide || wide_kind == kRef) {
- return As64BitReg(TargetReg(symbolic_reg));
- } else {
- return Check32BitReg(TargetReg(symbolic_reg));
- }
- }
- RegStorage TargetPtrReg(SpecialTargetRegister symbolic_reg) OVERRIDE {
+ // Required for target - register utilities.
+ RegStorage TargetReg(SpecialTargetRegister reg) OVERRIDE;
+ RegStorage TargetReg(SpecialTargetRegister symbolic_reg, WideKind wide_kind) OVERRIDE {
+ if (wide_kind == kWide || wide_kind == kRef) {
return As64BitReg(TargetReg(symbolic_reg));
+ } else {
+ return Check32BitReg(TargetReg(symbolic_reg));
}
- RegStorage GetArgMappingToPhysicalReg(int arg_num);
- RegLocation GetReturnAlt();
- RegLocation GetReturnWideAlt();
- RegLocation LocCReturn();
- RegLocation LocCReturnRef();
- RegLocation LocCReturnDouble();
- RegLocation LocCReturnFloat();
- RegLocation LocCReturnWide();
- ResourceMask GetRegMaskCommon(const RegStorage& reg) const OVERRIDE;
- void AdjustSpillMask();
- void ClobberCallerSave();
- void FreeCallTemps();
- void LockCallTemps();
- void CompilerInitializeRegAlloc();
+ }
+ RegStorage TargetPtrReg(SpecialTargetRegister symbolic_reg) OVERRIDE {
+ return As64BitReg(TargetReg(symbolic_reg));
+ }
+ RegStorage GetArgMappingToPhysicalReg(int arg_num) OVERRIDE;
+ RegLocation GetReturnAlt() OVERRIDE;
+ RegLocation GetReturnWideAlt() OVERRIDE;
+ RegLocation LocCReturn() OVERRIDE;
+ RegLocation LocCReturnRef() OVERRIDE;
+ RegLocation LocCReturnDouble() OVERRIDE;
+ RegLocation LocCReturnFloat() OVERRIDE;
+ RegLocation LocCReturnWide() OVERRIDE;
+ ResourceMask GetRegMaskCommon(const RegStorage& reg) const OVERRIDE;
+ void AdjustSpillMask() OVERRIDE;
+ void ClobberCallerSave() OVERRIDE;
+ void FreeCallTemps() OVERRIDE;
+ void LockCallTemps() OVERRIDE;
+ void CompilerInitializeRegAlloc() OVERRIDE;
- // Required for target - miscellaneous.
- void AssembleLIR();
- uint32_t LinkFixupInsns(LIR* head_lir, LIR* tail_lir, CodeOffset offset);
- int AssignInsnOffsets();
- void AssignOffsets();
- uint8_t* EncodeLIRs(uint8_t* write_pos, LIR* lir);
- void DumpResourceMask(LIR* lir, const ResourceMask& mask, const char* prefix) OVERRIDE;
- void SetupTargetResourceMasks(LIR* lir, uint64_t flags,
- ResourceMask* use_mask, ResourceMask* def_mask) OVERRIDE;
- const char* GetTargetInstFmt(int opcode);
- const char* GetTargetInstName(int opcode);
- std::string BuildInsnString(const char* fmt, LIR* lir, unsigned char* base_addr);
- ResourceMask GetPCUseDefEncoding() const OVERRIDE;
- uint64_t GetTargetInstFlags(int opcode);
- size_t GetInsnSize(LIR* lir) OVERRIDE;
- bool IsUnconditionalBranch(LIR* lir);
+ // Required for target - miscellaneous.
+ void AssembleLIR() OVERRIDE;
+ void DumpResourceMask(LIR* lir, const ResourceMask& mask, const char* prefix) OVERRIDE;
+ void SetupTargetResourceMasks(LIR* lir, uint64_t flags,
+ ResourceMask* use_mask, ResourceMask* def_mask) OVERRIDE;
+ const char* GetTargetInstFmt(int opcode) OVERRIDE;
+ const char* GetTargetInstName(int opcode) OVERRIDE;
+ std::string BuildInsnString(const char* fmt, LIR* lir, unsigned char* base_addr) OVERRIDE;
+ ResourceMask GetPCUseDefEncoding() const OVERRIDE;
+ uint64_t GetTargetInstFlags(int opcode) OVERRIDE;
+ size_t GetInsnSize(LIR* lir) OVERRIDE;
+ bool IsUnconditionalBranch(LIR* lir) OVERRIDE;
- // Get the register class for load/store of a field.
- RegisterClass RegClassForFieldLoadStore(OpSize size, bool is_volatile) OVERRIDE;
+ // Get the register class for load/store of a field.
+ RegisterClass RegClassForFieldLoadStore(OpSize size, bool is_volatile) OVERRIDE;
- // Required for target - Dalvik-level generators.
- void GenShiftOpLong(Instruction::Code opcode, RegLocation rl_dest, RegLocation rl_src1,
- RegLocation lr_shift);
- void GenArithImmOpLong(Instruction::Code opcode, RegLocation rl_dest,
- RegLocation rl_src1, RegLocation rl_src2);
- void GenArrayGet(int opt_flags, OpSize size, RegLocation rl_array,
- RegLocation rl_index, RegLocation rl_dest, int scale);
- void GenArrayPut(int opt_flags, OpSize size, RegLocation rl_array, RegLocation rl_index,
- RegLocation rl_src, int scale, bool card_mark);
- void GenShiftImmOpLong(Instruction::Code opcode, RegLocation rl_dest,
- RegLocation rl_src1, RegLocation rl_shift);
- void GenLongOp(OpKind op, RegLocation rl_dest, RegLocation rl_src1, RegLocation rl_src2);
- void GenMulLong(Instruction::Code opcode, RegLocation rl_dest, RegLocation rl_src1,
- RegLocation rl_src2);
- void GenAddLong(Instruction::Code opcode, RegLocation rl_dest, RegLocation rl_src1,
- RegLocation rl_src2);
- void GenAndLong(Instruction::Code opcode, RegLocation rl_dest, RegLocation rl_src1,
- RegLocation rl_src2);
- void GenArithOpDouble(Instruction::Code opcode, RegLocation rl_dest, RegLocation rl_src1,
- RegLocation rl_src2);
- void GenArithOpFloat(Instruction::Code opcode, RegLocation rl_dest, RegLocation rl_src1,
- RegLocation rl_src2);
- void GenCmpFP(Instruction::Code opcode, RegLocation rl_dest, RegLocation rl_src1,
- RegLocation rl_src2);
- void GenConversion(Instruction::Code opcode, RegLocation rl_dest, RegLocation rl_src);
- bool GenInlinedReverseBits(CallInfo* info, OpSize size);
- bool GenInlinedAbsFloat(CallInfo* info) OVERRIDE;
- bool GenInlinedAbsDouble(CallInfo* info) OVERRIDE;
- bool GenInlinedCas(CallInfo* info, bool is_long, bool is_object);
- bool GenInlinedMinMax(CallInfo* info, bool is_min, bool is_long);
- bool GenInlinedMinMaxFP(CallInfo* info, bool is_min, bool is_double);
- bool GenInlinedSqrt(CallInfo* info);
- bool GenInlinedPeek(CallInfo* info, OpSize size);
- bool GenInlinedPoke(CallInfo* info, OpSize size);
- bool GenInlinedAbsLong(CallInfo* info);
- void GenIntToLong(RegLocation rl_dest, RegLocation rl_src);
- void GenNotLong(RegLocation rl_dest, RegLocation rl_src);
- void GenNegLong(RegLocation rl_dest, RegLocation rl_src);
- void GenOrLong(Instruction::Code opcode, RegLocation rl_dest, RegLocation rl_src1,
- RegLocation rl_src2);
- void GenSubLong(Instruction::Code opcode, RegLocation rl_dest, RegLocation rl_src1,
- RegLocation rl_src2);
- void GenXorLong(Instruction::Code opcode, RegLocation rl_dest, RegLocation rl_src1,
- RegLocation rl_src2);
- void GenDivRemLong(Instruction::Code opcode, RegLocation rl_dest, RegLocation rl_src1,
- RegLocation rl_src2, bool is_div);
- RegLocation GenDivRem(RegLocation rl_dest, RegStorage reg_lo, RegStorage reg_hi, bool is_div);
- RegLocation GenDivRemLit(RegLocation rl_dest, RegStorage reg_lo, int lit, bool is_div);
- void GenCmpLong(RegLocation rl_dest, RegLocation rl_src1, RegLocation rl_src2);
- void GenDivZeroCheckWide(RegStorage reg);
- void GenEntrySequence(RegLocation* ArgLocs, RegLocation rl_method);
- void GenExitSequence();
- void GenSpecialExitSequence();
- void GenFillArrayData(DexOffset table_offset, RegLocation rl_src);
- void GenFusedFPCmpBranch(BasicBlock* bb, MIR* mir, bool gt_bias, bool is_double);
- void GenFusedLongCmpBranch(BasicBlock* bb, MIR* mir);
- void GenSelect(BasicBlock* bb, MIR* mir) OVERRIDE;
- void GenSelectConst32(RegStorage left_op, RegStorage right_op, ConditionCode code,
- int32_t true_val, int32_t false_val, RegStorage rs_dest,
- int dest_reg_class) OVERRIDE;
- // Helper used in the above two.
- void GenSelect(int32_t left, int32_t right, ConditionCode code, RegStorage rs_dest,
- int result_reg_class);
+ // Required for target - Dalvik-level generators.
+ void GenShiftOpLong(Instruction::Code opcode, RegLocation rl_dest, RegLocation rl_src1,
+ RegLocation lr_shift) OVERRIDE;
+ void GenArithImmOpLong(Instruction::Code opcode, RegLocation rl_dest, RegLocation rl_src1,
+ RegLocation rl_src2) OVERRIDE;
+ void GenArrayGet(int opt_flags, OpSize size, RegLocation rl_array, RegLocation rl_index,
+ RegLocation rl_dest, int scale) OVERRIDE;
+ void GenArrayPut(int opt_flags, OpSize size, RegLocation rl_array, RegLocation rl_index,
+ RegLocation rl_src, int scale, bool card_mark) OVERRIDE;
+ void GenShiftImmOpLong(Instruction::Code opcode, RegLocation rl_dest, RegLocation rl_src1,
+ RegLocation rl_shift) OVERRIDE;
+ void GenMulLong(Instruction::Code opcode, RegLocation rl_dest, RegLocation rl_src1,
+ RegLocation rl_src2) OVERRIDE;
+ void GenAddLong(Instruction::Code opcode, RegLocation rl_dest, RegLocation rl_src1,
+ RegLocation rl_src2) OVERRIDE;
+ void GenAndLong(Instruction::Code opcode, RegLocation rl_dest, RegLocation rl_src1,
+ RegLocation rl_src2) OVERRIDE;
+ void GenArithOpDouble(Instruction::Code opcode, RegLocation rl_dest, RegLocation rl_src1,
+ RegLocation rl_src2) OVERRIDE;
+ void GenArithOpFloat(Instruction::Code opcode, RegLocation rl_dest, RegLocation rl_src1,
+ RegLocation rl_src2) OVERRIDE;
+ void GenCmpFP(Instruction::Code opcode, RegLocation rl_dest, RegLocation rl_src1,
+ RegLocation rl_src2) OVERRIDE;
+ void GenConversion(Instruction::Code opcode, RegLocation rl_dest, RegLocation rl_src) OVERRIDE;
+ bool GenInlinedReverseBits(CallInfo* info, OpSize size) OVERRIDE;
+ bool GenInlinedAbsFloat(CallInfo* info) OVERRIDE;
+ bool GenInlinedAbsDouble(CallInfo* info) OVERRIDE;
+ bool GenInlinedCas(CallInfo* info, bool is_long, bool is_object) OVERRIDE;
+ bool GenInlinedMinMax(CallInfo* info, bool is_min, bool is_long) OVERRIDE;
+ bool GenInlinedMinMaxFP(CallInfo* info, bool is_min, bool is_double) OVERRIDE;
+ bool GenInlinedSqrt(CallInfo* info) OVERRIDE;
+ bool GenInlinedCeil(CallInfo* info) OVERRIDE;
+ bool GenInlinedFloor(CallInfo* info) OVERRIDE;
+ bool GenInlinedRint(CallInfo* info) OVERRIDE;
+ bool GenInlinedRound(CallInfo* info, bool is_double) OVERRIDE;
+ bool GenInlinedPeek(CallInfo* info, OpSize size) OVERRIDE;
+ bool GenInlinedPoke(CallInfo* info, OpSize size) OVERRIDE;
+ bool GenInlinedAbsLong(CallInfo* info) OVERRIDE;
+ void GenIntToLong(RegLocation rl_dest, RegLocation rl_src) OVERRIDE;
+ void GenNotLong(RegLocation rl_dest, RegLocation rl_src) OVERRIDE;
+ void GenNegLong(RegLocation rl_dest, RegLocation rl_src) OVERRIDE;
+ void GenOrLong(Instruction::Code opcode, RegLocation rl_dest, RegLocation rl_src1,
+ RegLocation rl_src2) OVERRIDE;
+ void GenSubLong(Instruction::Code opcode, RegLocation rl_dest, RegLocation rl_src1,
+ RegLocation rl_src2) OVERRIDE;
+ void GenXorLong(Instruction::Code opcode, RegLocation rl_dest, RegLocation rl_src1,
+ RegLocation rl_src2) OVERRIDE;
+ void GenDivRemLong(Instruction::Code opcode, RegLocation rl_dest, RegLocation rl_src1,
+ RegLocation rl_src2, bool is_div) OVERRIDE;
+ RegLocation GenDivRem(RegLocation rl_dest, RegStorage reg_lo, RegStorage reg_hi, bool is_div)
+ OVERRIDE;
+ RegLocation GenDivRemLit(RegLocation rl_dest, RegStorage reg_lo, int lit, bool is_div)
+ OVERRIDE;
+ void GenCmpLong(RegLocation rl_dest, RegLocation rl_src1, RegLocation rl_src2) OVERRIDE;
+ void GenDivZeroCheckWide(RegStorage reg) OVERRIDE;
+ void GenEntrySequence(RegLocation* ArgLocs, RegLocation rl_method) OVERRIDE;
+ void GenExitSequence() OVERRIDE;
+ void GenSpecialExitSequence() OVERRIDE;
+ void GenFillArrayData(DexOffset table_offset, RegLocation rl_src) OVERRIDE;
+ void GenFusedFPCmpBranch(BasicBlock* bb, MIR* mir, bool gt_bias, bool is_double) OVERRIDE;
+ void GenFusedLongCmpBranch(BasicBlock* bb, MIR* mir) OVERRIDE;
+ void GenSelect(BasicBlock* bb, MIR* mir) OVERRIDE;
+ void GenSelectConst32(RegStorage left_op, RegStorage right_op, ConditionCode code,
+ int32_t true_val, int32_t false_val, RegStorage rs_dest,
+ int dest_reg_class) OVERRIDE;
- bool GenMemBarrier(MemBarrierKind barrier_kind);
- void GenMonitorEnter(int opt_flags, RegLocation rl_src);
- void GenMonitorExit(int opt_flags, RegLocation rl_src);
- void GenMoveException(RegLocation rl_dest);
- void GenMultiplyByTwoBitMultiplier(RegLocation rl_src, RegLocation rl_result, int lit,
- int first_bit, int second_bit);
- void GenNegDouble(RegLocation rl_dest, RegLocation rl_src);
- void GenNegFloat(RegLocation rl_dest, RegLocation rl_src);
- void GenPackedSwitch(MIR* mir, DexOffset table_offset, RegLocation rl_src);
- void GenSparseSwitch(MIR* mir, DexOffset table_offset, RegLocation rl_src);
+ bool GenMemBarrier(MemBarrierKind barrier_kind) OVERRIDE;
+ void GenMonitorEnter(int opt_flags, RegLocation rl_src) OVERRIDE;
+ void GenMonitorExit(int opt_flags, RegLocation rl_src) OVERRIDE;
+ void GenMoveException(RegLocation rl_dest) OVERRIDE;
+ void GenMultiplyByTwoBitMultiplier(RegLocation rl_src, RegLocation rl_result, int lit,
+ int first_bit, int second_bit) OVERRIDE;
+ void GenNegDouble(RegLocation rl_dest, RegLocation rl_src) OVERRIDE;
+ void GenNegFloat(RegLocation rl_dest, RegLocation rl_src) OVERRIDE;
+ void GenPackedSwitch(MIR* mir, DexOffset table_offset, RegLocation rl_src) OVERRIDE;
+ void GenSparseSwitch(MIR* mir, DexOffset table_offset, RegLocation rl_src) OVERRIDE;
- // Spill core and FP registers. Returns the SP difference: either spill size, or whole
- // frame size.
- int SpillRegs(RegStorage base, uint32_t core_reg_mask, uint32_t fp_reg_mask, int frame_size);
+ // Required for target - single operation generators.
+ LIR* OpUnconditionalBranch(LIR* target) OVERRIDE;
+ LIR* OpCmpBranch(ConditionCode cond, RegStorage src1, RegStorage src2, LIR* target) OVERRIDE;
+ LIR* OpCmpImmBranch(ConditionCode cond, RegStorage reg, int check_value, LIR* target) OVERRIDE;
+ LIR* OpCondBranch(ConditionCode cc, LIR* target) OVERRIDE;
+ LIR* OpDecAndBranch(ConditionCode c_code, RegStorage reg, LIR* target) OVERRIDE;
+ LIR* OpFpRegCopy(RegStorage r_dest, RegStorage r_src) OVERRIDE;
+ LIR* OpIT(ConditionCode cond, const char* guide) OVERRIDE;
+ void OpEndIT(LIR* it) OVERRIDE;
+ LIR* OpMem(OpKind op, RegStorage r_base, int disp) OVERRIDE;
+ LIR* OpPcRelLoad(RegStorage reg, LIR* target) OVERRIDE;
+ LIR* OpReg(OpKind op, RegStorage r_dest_src) OVERRIDE;
+ void OpRegCopy(RegStorage r_dest, RegStorage r_src) OVERRIDE;
+ LIR* OpRegCopyNoInsert(RegStorage r_dest, RegStorage r_src) OVERRIDE;
+ LIR* OpRegImm(OpKind op, RegStorage r_dest_src1, int value) OVERRIDE;
+ LIR* OpRegReg(OpKind op, RegStorage r_dest_src1, RegStorage r_src2) OVERRIDE;
+ LIR* OpMovRegMem(RegStorage r_dest, RegStorage r_base, int offset, MoveType move_type) OVERRIDE;
+ LIR* OpMovMemReg(RegStorage r_base, int offset, RegStorage r_src, MoveType move_type) OVERRIDE;
+ LIR* OpCondRegReg(OpKind op, ConditionCode cc, RegStorage r_dest, RegStorage r_src) OVERRIDE;
+ LIR* OpRegRegImm(OpKind op, RegStorage r_dest, RegStorage r_src1, int value) OVERRIDE;
+ LIR* OpRegRegReg(OpKind op, RegStorage r_dest, RegStorage r_src1, RegStorage r_src2) OVERRIDE;
+ LIR* OpTestSuspend(LIR* target) OVERRIDE;
+ LIR* OpVldm(RegStorage r_base, int count) OVERRIDE;
+ LIR* OpVstm(RegStorage r_base, int count) OVERRIDE;
+ void OpRegCopyWide(RegStorage dest, RegStorage src) OVERRIDE;
- // Unspill core and FP registers.
- void UnspillRegs(RegStorage base, uint32_t core_reg_mask, uint32_t fp_reg_mask, int frame_size);
+ bool InexpensiveConstantInt(int32_t value) OVERRIDE;
+ bool InexpensiveConstantFloat(int32_t value) OVERRIDE;
+ bool InexpensiveConstantLong(int64_t value) OVERRIDE;
+ bool InexpensiveConstantDouble(int64_t value) OVERRIDE;
- // Required for target - single operation generators.
- LIR* OpUnconditionalBranch(LIR* target);
- LIR* OpCmpBranch(ConditionCode cond, RegStorage src1, RegStorage src2, LIR* target);
- LIR* OpCmpImmBranch(ConditionCode cond, RegStorage reg, int check_value, LIR* target);
- LIR* OpCondBranch(ConditionCode cc, LIR* target);
- LIR* OpDecAndBranch(ConditionCode c_code, RegStorage reg, LIR* target);
- LIR* OpFpRegCopy(RegStorage r_dest, RegStorage r_src);
- LIR* OpIT(ConditionCode cond, const char* guide);
- void OpEndIT(LIR* it);
- LIR* OpMem(OpKind op, RegStorage r_base, int disp);
- LIR* OpPcRelLoad(RegStorage reg, LIR* target);
- LIR* OpReg(OpKind op, RegStorage r_dest_src);
- void OpRegCopy(RegStorage r_dest, RegStorage r_src);
- LIR* OpRegCopyNoInsert(RegStorage r_dest, RegStorage r_src);
- LIR* OpRegImm64(OpKind op, RegStorage r_dest_src1, int64_t value);
- LIR* OpRegImm(OpKind op, RegStorage r_dest_src1, int value);
- LIR* OpRegReg(OpKind op, RegStorage r_dest_src1, RegStorage r_src2);
- LIR* OpMovRegMem(RegStorage r_dest, RegStorage r_base, int offset, MoveType move_type);
- LIR* OpMovMemReg(RegStorage r_base, int offset, RegStorage r_src, MoveType move_type);
- LIR* OpCondRegReg(OpKind op, ConditionCode cc, RegStorage r_dest, RegStorage r_src);
- LIR* OpRegRegImm64(OpKind op, RegStorage r_dest, RegStorage r_src1, int64_t value);
- LIR* OpRegRegImm(OpKind op, RegStorage r_dest, RegStorage r_src1, int value);
- LIR* OpRegRegReg(OpKind op, RegStorage r_dest, RegStorage r_src1, RegStorage r_src2);
- LIR* OpTestSuspend(LIR* target);
- LIR* OpVldm(RegStorage r_base, int count);
- LIR* OpVstm(RegStorage r_base, int count);
- void OpRegCopyWide(RegStorage dest, RegStorage src);
+ void FlushIns(RegLocation* ArgLocs, RegLocation rl_method) OVERRIDE;
- LIR* LoadBaseDispBody(RegStorage r_base, int displacement, RegStorage r_dest, OpSize size);
- LIR* StoreBaseDispBody(RegStorage r_base, int displacement, RegStorage r_src, OpSize size);
- LIR* OpRegRegRegShift(OpKind op, RegStorage r_dest, RegStorage r_src1, RegStorage r_src2,
- int shift);
- LIR* OpRegRegRegExtend(OpKind op, RegStorage r_dest, RegStorage r_src1, RegStorage r_src2,
- A64RegExtEncodings ext, uint8_t amount);
- LIR* OpRegRegShift(OpKind op, RegStorage r_dest_src1, RegStorage r_src2, int shift);
- LIR* OpRegRegExtend(OpKind op, RegStorage r_dest_src1, RegStorage r_src2,
- A64RegExtEncodings ext, uint8_t amount);
- static const ArmEncodingMap EncodingMap[kA64Last];
- int EncodeShift(int code, int amount);
- int EncodeExtend(int extend_type, int amount);
- bool IsExtendEncoding(int encoded_value);
- int EncodeLogicalImmediate(bool is_wide, uint64_t value);
- uint64_t DecodeLogicalImmediate(bool is_wide, int value);
-
- ArmConditionCode ArmConditionEncoding(ConditionCode code);
- bool InexpensiveConstantInt(int32_t value);
- bool InexpensiveConstantFloat(int32_t value);
- bool InexpensiveConstantLong(int64_t value);
- bool InexpensiveConstantDouble(int64_t value);
-
- void FlushIns(RegLocation* ArgLocs, RegLocation rl_method);
-
- int GenDalvikArgsNoRange(CallInfo* info, int call_state, LIR** pcrLabel,
- NextCallInsn next_call_insn,
- const MethodReference& target_method,
- uint32_t vtable_idx,
- uintptr_t direct_code, uintptr_t direct_method, InvokeType type,
- bool skip_this);
-
- int GenDalvikArgsRange(CallInfo* info, int call_state, LIR** pcrLabel,
+ int GenDalvikArgsNoRange(CallInfo* info, int call_state, LIR** pcrLabel,
NextCallInsn next_call_insn,
const MethodReference& target_method,
uint32_t vtable_idx,
uintptr_t direct_code, uintptr_t direct_method, InvokeType type,
- bool skip_this);
- InToRegStorageMapping in_to_reg_storage_mapping_;
+ bool skip_this) OVERRIDE;
- bool WideGPRsAreAliases() OVERRIDE {
- return true; // 64b architecture.
- }
- bool WideFPRsAreAliases() OVERRIDE {
- return true; // 64b architecture.
- }
- size_t GetInstructionOffset(LIR* lir);
+ int GenDalvikArgsRange(CallInfo* info, int call_state, LIR** pcrLabel,
+ NextCallInsn next_call_insn,
+ const MethodReference& target_method,
+ uint32_t vtable_idx,
+ uintptr_t direct_code, uintptr_t direct_method, InvokeType type,
+ bool skip_this) OVERRIDE;
- LIR* InvokeTrampoline(OpKind op, RegStorage r_tgt, QuickEntrypointEnum trampoline) OVERRIDE;
+ bool WideGPRsAreAliases() OVERRIDE {
+ return true; // 64b architecture.
+ }
+ bool WideFPRsAreAliases() OVERRIDE {
+ return true; // 64b architecture.
+ }
- private:
- /**
- * @brief Given register xNN (dNN), returns register wNN (sNN).
- * @param reg #RegStorage containing a Solo64 input register (e.g. @c x1 or @c d2).
- * @return A Solo32 with the same register number as the @p reg (e.g. @c w1 or @c s2).
- * @see As64BitReg
- */
- RegStorage As32BitReg(RegStorage reg) {
- DCHECK(!reg.IsPair());
- if ((kFailOnSizeError || kReportSizeError) && !reg.Is64Bit()) {
- if (kFailOnSizeError) {
- LOG(FATAL) << "Expected 64b register";
- } else {
- LOG(WARNING) << "Expected 64b register";
- return reg;
- }
+ size_t GetInstructionOffset(LIR* lir) OVERRIDE;
+
+ LIR* InvokeTrampoline(OpKind op, RegStorage r_tgt, QuickEntrypointEnum trampoline) OVERRIDE;
+
+ private:
+ /**
+ * @brief Given register xNN (dNN), returns register wNN (sNN).
+ * @param reg #RegStorage containing a Solo64 input register (e.g. @c x1 or @c d2).
+ * @return A Solo32 with the same register number as the @p reg (e.g. @c w1 or @c s2).
+ * @see As64BitReg
+ */
+ RegStorage As32BitReg(RegStorage reg) {
+ DCHECK(!reg.IsPair());
+ if ((kFailOnSizeError || kReportSizeError) && !reg.Is64Bit()) {
+ if (kFailOnSizeError) {
+ LOG(FATAL) << "Expected 64b register";
+ } else {
+ LOG(WARNING) << "Expected 64b register";
+ return reg;
}
- RegStorage ret_val = RegStorage(RegStorage::k32BitSolo,
- reg.GetRawBits() & RegStorage::kRegTypeMask);
- DCHECK_EQ(GetRegInfo(reg)->FindMatchingView(RegisterInfo::k32SoloStorageMask)
- ->GetReg().GetReg(),
- ret_val.GetReg());
- return ret_val;
}
+ RegStorage ret_val = RegStorage(RegStorage::k32BitSolo,
+ reg.GetRawBits() & RegStorage::kRegTypeMask);
+ DCHECK_EQ(GetRegInfo(reg)->FindMatchingView(RegisterInfo::k32SoloStorageMask)
+ ->GetReg().GetReg(),
+ ret_val.GetReg());
+ return ret_val;
+ }
- RegStorage Check32BitReg(RegStorage reg) {
- if ((kFailOnSizeError || kReportSizeError) && !reg.Is32Bit()) {
- if (kFailOnSizeError) {
- LOG(FATAL) << "Checked for 32b register";
- } else {
- LOG(WARNING) << "Checked for 32b register";
- return As32BitReg(reg);
- }
+ RegStorage Check32BitReg(RegStorage reg) {
+ if ((kFailOnSizeError || kReportSizeError) && !reg.Is32Bit()) {
+ if (kFailOnSizeError) {
+ LOG(FATAL) << "Checked for 32b register";
+ } else {
+ LOG(WARNING) << "Checked for 32b register";
+ return As32BitReg(reg);
}
- return reg;
}
+ return reg;
+ }
- /**
- * @brief Given register wNN (sNN), returns register xNN (dNN).
- * @param reg #RegStorage containing a Solo32 input register (e.g. @c w1 or @c s2).
- * @return A Solo64 with the same register number as the @p reg (e.g. @c x1 or @c d2).
- * @see As32BitReg
- */
- RegStorage As64BitReg(RegStorage reg) {
- DCHECK(!reg.IsPair());
- if ((kFailOnSizeError || kReportSizeError) && !reg.Is32Bit()) {
- if (kFailOnSizeError) {
- LOG(FATAL) << "Expected 32b register";
- } else {
- LOG(WARNING) << "Expected 32b register";
- return reg;
- }
+ /**
+ * @brief Given register wNN (sNN), returns register xNN (dNN).
+ * @param reg #RegStorage containing a Solo32 input register (e.g. @c w1 or @c s2).
+ * @return A Solo64 with the same register number as the @p reg (e.g. @c x1 or @c d2).
+ * @see As32BitReg
+ */
+ RegStorage As64BitReg(RegStorage reg) {
+ DCHECK(!reg.IsPair());
+ if ((kFailOnSizeError || kReportSizeError) && !reg.Is32Bit()) {
+ if (kFailOnSizeError) {
+ LOG(FATAL) << "Expected 32b register";
+ } else {
+ LOG(WARNING) << "Expected 32b register";
+ return reg;
}
- RegStorage ret_val = RegStorage(RegStorage::k64BitSolo,
- reg.GetRawBits() & RegStorage::kRegTypeMask);
- DCHECK_EQ(GetRegInfo(reg)->FindMatchingView(RegisterInfo::k64SoloStorageMask)
- ->GetReg().GetReg(),
- ret_val.GetReg());
- return ret_val;
}
+ RegStorage ret_val = RegStorage(RegStorage::k64BitSolo,
+ reg.GetRawBits() & RegStorage::kRegTypeMask);
+ DCHECK_EQ(GetRegInfo(reg)->FindMatchingView(RegisterInfo::k64SoloStorageMask)
+ ->GetReg().GetReg(),
+ ret_val.GetReg());
+ return ret_val;
+ }
- RegStorage Check64BitReg(RegStorage reg) {
- if ((kFailOnSizeError || kReportSizeError) && !reg.Is64Bit()) {
- if (kFailOnSizeError) {
- LOG(FATAL) << "Checked for 64b register";
- } else {
- LOG(WARNING) << "Checked for 64b register";
- return As64BitReg(reg);
- }
+ RegStorage Check64BitReg(RegStorage reg) {
+ if ((kFailOnSizeError || kReportSizeError) && !reg.Is64Bit()) {
+ if (kFailOnSizeError) {
+ LOG(FATAL) << "Checked for 64b register";
+ } else {
+ LOG(WARNING) << "Checked for 64b register";
+ return As64BitReg(reg);
}
- return reg;
}
+ return reg;
+ }
- LIR* LoadFPConstantValue(RegStorage r_dest, int32_t value);
- LIR* LoadFPConstantValueWide(RegStorage r_dest, int64_t value);
- void ReplaceFixup(LIR* prev_lir, LIR* orig_lir, LIR* new_lir);
- void InsertFixupBefore(LIR* prev_lir, LIR* orig_lir, LIR* new_lir);
- void AssignDataOffsets();
- RegLocation GenDivRem(RegLocation rl_dest, RegLocation rl_src1, RegLocation rl_src2,
- bool is_div, bool check_zero);
- RegLocation GenDivRemLit(RegLocation rl_dest, RegLocation rl_src1, int lit, bool is_div);
- size_t GetLoadStoreSize(LIR* lir);
+ int32_t EncodeImmSingle(uint32_t bits);
+ int32_t EncodeImmDouble(uint64_t bits);
+ LIR* LoadFPConstantValue(RegStorage r_dest, int32_t value);
+ LIR* LoadFPConstantValueWide(RegStorage r_dest, int64_t value);
+ void ReplaceFixup(LIR* prev_lir, LIR* orig_lir, LIR* new_lir);
+ void InsertFixupBefore(LIR* prev_lir, LIR* orig_lir, LIR* new_lir);
+ void AssignDataOffsets();
+ RegLocation GenDivRem(RegLocation rl_dest, RegLocation rl_src1, RegLocation rl_src2,
+ bool is_div, bool check_zero);
+ RegLocation GenDivRemLit(RegLocation rl_dest, RegLocation rl_src1, int lit, bool is_div);
+ size_t GetLoadStoreSize(LIR* lir);
+
+ bool SmallLiteralDivRem64(Instruction::Code dalvik_opcode, bool is_div, RegLocation rl_src,
+ RegLocation rl_dest, int64_t lit);
+
+ uint32_t LinkFixupInsns(LIR* head_lir, LIR* tail_lir, CodeOffset offset);
+ int AssignInsnOffsets();
+ void AssignOffsets();
+ uint8_t* EncodeLIRs(uint8_t* write_pos, LIR* lir);
+
+ // Spill core and FP registers. Returns the SP difference: either spill size, or whole
+ // frame size.
+ int SpillRegs(RegStorage base, uint32_t core_reg_mask, uint32_t fp_reg_mask, int frame_size);
+
+ // Unspill core and FP registers.
+ void UnspillRegs(RegStorage base, uint32_t core_reg_mask, uint32_t fp_reg_mask, int frame_size);
+
+ void GenLongOp(OpKind op, RegLocation rl_dest, RegLocation rl_src1, RegLocation rl_src2);
+
+ LIR* OpRegImm64(OpKind op, RegStorage r_dest_src1, int64_t value);
+ LIR* OpRegRegImm64(OpKind op, RegStorage r_dest, RegStorage r_src1, int64_t value);
+
+ LIR* OpRegRegShift(OpKind op, RegStorage r_dest_src1, RegStorage r_src2, int shift);
+ LIR* OpRegRegRegShift(OpKind op, RegStorage r_dest, RegStorage r_src1, RegStorage r_src2,
+ int shift);
+ int EncodeShift(int code, int amount);
+
+ LIR* OpRegRegExtend(OpKind op, RegStorage r_dest_src1, RegStorage r_src2,
+ A64RegExtEncodings ext, uint8_t amount);
+ LIR* OpRegRegRegExtend(OpKind op, RegStorage r_dest, RegStorage r_src1, RegStorage r_src2,
+ A64RegExtEncodings ext, uint8_t amount);
+ int EncodeExtend(int extend_type, int amount);
+ bool IsExtendEncoding(int encoded_value);
+
+ LIR* LoadBaseDispBody(RegStorage r_base, int displacement, RegStorage r_dest, OpSize size);
+ LIR* StoreBaseDispBody(RegStorage r_base, int displacement, RegStorage r_src, OpSize size);
+
+ int EncodeLogicalImmediate(bool is_wide, uint64_t value);
+ uint64_t DecodeLogicalImmediate(bool is_wide, int value);
+ ArmConditionCode ArmConditionEncoding(ConditionCode code);
+
+ // Helper used in the two GenSelect variants.
+ void GenSelect(int32_t left, int32_t right, ConditionCode code, RegStorage rs_dest,
+ int result_reg_class);
+
+ InToRegStorageMapping in_to_reg_storage_mapping_;
+ static const ArmEncodingMap EncodingMap[kA64Last];
};
} // namespace art
diff --git a/compiler/dex/quick/arm64/fp_arm64.cc b/compiler/dex/quick/arm64/fp_arm64.cc
index ed13c04..d0b2636 100644
--- a/compiler/dex/quick/arm64/fp_arm64.cc
+++ b/compiler/dex/quick/arm64/fp_arm64.cc
@@ -17,6 +17,7 @@
#include "arm64_lir.h"
#include "codegen_arm64.h"
#include "dex/quick/mir_to_lir-inl.h"
+#include "utils.h"
namespace art {
@@ -386,6 +387,52 @@
return true;
}
+bool Arm64Mir2Lir::GenInlinedCeil(CallInfo* info) {
+ RegLocation rl_src = info->args[0];
+ RegLocation rl_dest = InlineTargetWide(info);
+ rl_src = LoadValueWide(rl_src, kFPReg);
+ RegLocation rl_result = EvalLoc(rl_dest, kFPReg, true);
+ NewLIR2(FWIDE(kA64Frintp2ff), rl_result.reg.GetReg(), rl_src.reg.GetReg());
+ StoreValueWide(rl_dest, rl_result);
+ return true;
+}
+
+bool Arm64Mir2Lir::GenInlinedFloor(CallInfo* info) {
+ RegLocation rl_src = info->args[0];
+ RegLocation rl_dest = InlineTargetWide(info);
+ rl_src = LoadValueWide(rl_src, kFPReg);
+ RegLocation rl_result = EvalLoc(rl_dest, kFPReg, true);
+ NewLIR2(FWIDE(kA64Frintm2ff), rl_result.reg.GetReg(), rl_src.reg.GetReg());
+ StoreValueWide(rl_dest, rl_result);
+ return true;
+}
+
+bool Arm64Mir2Lir::GenInlinedRint(CallInfo* info) {
+ RegLocation rl_src = info->args[0];
+ RegLocation rl_dest = InlineTargetWide(info);
+ rl_src = LoadValueWide(rl_src, kFPReg);
+ RegLocation rl_result = EvalLoc(rl_dest, kFPReg, true);
+ NewLIR2(FWIDE(kA64Frintn2ff), rl_result.reg.GetReg(), rl_src.reg.GetReg());
+ StoreValueWide(rl_dest, rl_result);
+ return true;
+}
+
+bool Arm64Mir2Lir::GenInlinedRound(CallInfo* info, bool is_double) {
+ int32_t encoded_imm = EncodeImmSingle(bit_cast<float, uint32_t>(0.5f));
+ ArmOpcode wide = (is_double) ? FWIDE(0) : FUNWIDE(0);
+ RegLocation rl_src = info->args[0];
+ RegLocation rl_dest = (is_double) ? InlineTargetWide(info) : InlineTarget(info);
+ rl_src = (is_double) ? LoadValueWide(rl_src, kFPReg) : LoadValue(rl_src, kFPReg);
+ RegLocation rl_result = EvalLoc(rl_dest, kCoreReg, true);
+ RegStorage r_tmp = (is_double) ? AllocTempDouble() : AllocTempSingle();
+ // 0.5f and 0.5d are encoded in the same way.
+ NewLIR2(kA64Fmov2fI | wide, r_tmp.GetReg(), encoded_imm);
+ NewLIR3(kA64Fadd3fff | wide, rl_src.reg.GetReg(), rl_src.reg.GetReg(), r_tmp.GetReg());
+ NewLIR2((is_double) ? kA64Fcvtms2xS : kA64Fcvtms2ws, rl_result.reg.GetReg(), rl_src.reg.GetReg());
+ (is_double) ? StoreValueWide(rl_dest, rl_result) : StoreValue(rl_dest, rl_result);
+ return true;
+}
+
bool Arm64Mir2Lir::GenInlinedMinMaxFP(CallInfo* info, bool is_min, bool is_double) {
DCHECK_EQ(cu_->instruction_set, kArm64);
int op = (is_min) ? kA64Fmin3fff : kA64Fmax3fff;
diff --git a/compiler/dex/quick/arm64/utility_arm64.cc b/compiler/dex/quick/arm64/utility_arm64.cc
index 5131bd8..cd1840a 100644
--- a/compiler/dex/quick/arm64/utility_arm64.cc
+++ b/compiler/dex/quick/arm64/utility_arm64.cc
@@ -23,7 +23,7 @@
/* This file contains codegen for the A64 ISA. */
-static int32_t EncodeImmSingle(uint32_t bits) {
+int32_t Arm64Mir2Lir::EncodeImmSingle(uint32_t bits) {
/*
* Valid values will have the form:
*
@@ -55,7 +55,7 @@
return (bit7 | bit6 | bit5_to_0);
}
-static int32_t EncodeImmDouble(uint64_t bits) {
+int32_t Arm64Mir2Lir::EncodeImmDouble(uint64_t bits) {
/*
* Valid values will have the form:
*
diff --git a/compiler/dex/quick/dex_file_method_inliner.cc b/compiler/dex/quick/dex_file_method_inliner.cc
index 0e46c96..7abf3e7 100644
--- a/compiler/dex/quick/dex_file_method_inliner.cc
+++ b/compiler/dex/quick/dex_file_method_inliner.cc
@@ -48,6 +48,11 @@
true, // kIntrinsicMinMaxFloat
true, // kIntrinsicMinMaxDouble
true, // kIntrinsicSqrt
+ true, // kIntrinsicCeil
+ true, // kIntrinsicFloor
+ true, // kIntrinsicRint
+ true, // kIntrinsicRoundFloat
+ true, // kIntrinsicRoundDouble
false, // kIntrinsicGet
false, // kIntrinsicCharAt
false, // kIntrinsicCompareTo
@@ -75,6 +80,11 @@
COMPILE_ASSERT(kIntrinsicIsStatic[kIntrinsicMinMaxFloat], MinMaxFloat_must_be_static);
COMPILE_ASSERT(kIntrinsicIsStatic[kIntrinsicMinMaxDouble], MinMaxDouble_must_be_static);
COMPILE_ASSERT(kIntrinsicIsStatic[kIntrinsicSqrt], Sqrt_must_be_static);
+COMPILE_ASSERT(kIntrinsicIsStatic[kIntrinsicCeil], Ceil_must_be_static);
+COMPILE_ASSERT(kIntrinsicIsStatic[kIntrinsicFloor], Floor_must_be_static);
+COMPILE_ASSERT(kIntrinsicIsStatic[kIntrinsicRint], Rint_must_be_static);
+COMPILE_ASSERT(kIntrinsicIsStatic[kIntrinsicRoundFloat], RoundFloat_must_be_static);
+COMPILE_ASSERT(kIntrinsicIsStatic[kIntrinsicRoundDouble], RoundDouble_must_be_static);
COMPILE_ASSERT(!kIntrinsicIsStatic[kIntrinsicGet], Get_must_not_be_static);
COMPILE_ASSERT(!kIntrinsicIsStatic[kIntrinsicCharAt], CharAt_must_not_be_static);
COMPILE_ASSERT(!kIntrinsicIsStatic[kIntrinsicCompareTo], CompareTo_must_not_be_static);
@@ -155,6 +165,10 @@
"max", // kNameCacheMax
"min", // kNameCacheMin
"sqrt", // kNameCacheSqrt
+ "ceil", // kNameCacheCeil
+ "floor", // kNameCacheFloor
+ "rint", // kNameCacheRint
+ "round", // kNameCacheRound
"get", // kNameCacheGet
"charAt", // kNameCacheCharAt
"compareTo", // kNameCacheCompareTo
@@ -314,6 +328,17 @@
INTRINSIC(JavaLangMath, Sqrt, D_D, kIntrinsicSqrt, 0),
INTRINSIC(JavaLangStrictMath, Sqrt, D_D, kIntrinsicSqrt, 0),
+ INTRINSIC(JavaLangMath, Ceil, D_D, kIntrinsicCeil, 0),
+ INTRINSIC(JavaLangStrictMath, Ceil, D_D, kIntrinsicCeil, 0),
+ INTRINSIC(JavaLangMath, Floor, D_D, kIntrinsicFloor, 0),
+ INTRINSIC(JavaLangStrictMath, Floor, D_D, kIntrinsicFloor, 0),
+ INTRINSIC(JavaLangMath, Rint, D_D, kIntrinsicRint, 0),
+ INTRINSIC(JavaLangStrictMath, Rint, D_D, kIntrinsicRint, 0),
+ INTRINSIC(JavaLangMath, Round, F_I, kIntrinsicRoundFloat, 0),
+ INTRINSIC(JavaLangStrictMath, Round, F_I, kIntrinsicRoundFloat, 0),
+ INTRINSIC(JavaLangMath, Round, D_J, kIntrinsicRoundDouble, 0),
+ INTRINSIC(JavaLangStrictMath, Round, D_J, kIntrinsicRoundDouble, 0),
+
INTRINSIC(JavaLangRefReference, Get, _Object, kIntrinsicGet, 0),
INTRINSIC(JavaLangString, CharAt, I_C, kIntrinsicCharAt, 0),
@@ -436,6 +461,16 @@
return backend->GenInlinedMinMaxFP(info, intrinsic.d.data & kIntrinsicFlagMin, true /* is_double */);
case kIntrinsicSqrt:
return backend->GenInlinedSqrt(info);
+ case kIntrinsicCeil:
+ return backend->GenInlinedCeil(info);
+ case kIntrinsicFloor:
+ return backend->GenInlinedFloor(info);
+ case kIntrinsicRint:
+ return backend->GenInlinedRint(info);
+ case kIntrinsicRoundFloat:
+ return backend->GenInlinedRound(info, false /* is_double */);
+ case kIntrinsicRoundDouble:
+ return backend->GenInlinedRound(info, true /* is_double */);
case kIntrinsicGet:
return backend->GenInlinedGet(info);
case kIntrinsicCharAt:
diff --git a/compiler/dex/quick/dex_file_method_inliner.h b/compiler/dex/quick/dex_file_method_inliner.h
index cb8c165..1bd3c48 100644
--- a/compiler/dex/quick/dex_file_method_inliner.h
+++ b/compiler/dex/quick/dex_file_method_inliner.h
@@ -141,6 +141,10 @@
kNameCacheMax,
kNameCacheMin,
kNameCacheSqrt,
+ kNameCacheCeil,
+ kNameCacheFloor,
+ kNameCacheRint,
+ kNameCacheRound,
kNameCacheGet,
kNameCacheCharAt,
kNameCacheCompareTo,
diff --git a/compiler/dex/quick/gen_invoke.cc b/compiler/dex/quick/gen_invoke.cc
index 5fc6996..8e7f6a6 100755
--- a/compiler/dex/quick/gen_invoke.cc
+++ b/compiler/dex/quick/gen_invoke.cc
@@ -1427,6 +1427,22 @@
return false;
}
+bool Mir2Lir::GenInlinedCeil(CallInfo* info) {
+ return false;
+}
+
+bool Mir2Lir::GenInlinedFloor(CallInfo* info) {
+ return false;
+}
+
+bool Mir2Lir::GenInlinedRint(CallInfo* info) {
+ return false;
+}
+
+bool Mir2Lir::GenInlinedRound(CallInfo* info, bool is_double) {
+ return false;
+}
+
bool Mir2Lir::GenInlinedFloatCvt(CallInfo* info) {
if (cu_->instruction_set == kMips) {
// TODO - add Mips implementation
diff --git a/compiler/dex/quick/mir_to_lir.h b/compiler/dex/quick/mir_to_lir.h
index b19942d..70c17da 100644
--- a/compiler/dex/quick/mir_to_lir.h
+++ b/compiler/dex/quick/mir_to_lir.h
@@ -965,6 +965,10 @@
virtual bool GenInlinedAbsDouble(CallInfo* info) = 0;
bool GenInlinedFloatCvt(CallInfo* info);
bool GenInlinedDoubleCvt(CallInfo* info);
+ virtual bool GenInlinedCeil(CallInfo* info);
+ virtual bool GenInlinedFloor(CallInfo* info);
+ virtual bool GenInlinedRint(CallInfo* info);
+ virtual bool GenInlinedRound(CallInfo* info, bool is_double);
virtual bool GenInlinedArrayCopyCharArray(CallInfo* info);
virtual bool GenInlinedIndexOf(CallInfo* info, bool zero_based);
bool GenInlinedStringCompareTo(CallInfo* info);
diff --git a/runtime/quick/inline_method_analyser.h b/runtime/quick/inline_method_analyser.h
index 982553d..23b9aed 100644
--- a/runtime/quick/inline_method_analyser.h
+++ b/runtime/quick/inline_method_analyser.h
@@ -48,6 +48,11 @@
kIntrinsicMinMaxFloat,
kIntrinsicMinMaxDouble,
kIntrinsicSqrt,
+ kIntrinsicCeil,
+ kIntrinsicFloor,
+ kIntrinsicRint,
+ kIntrinsicRoundFloat,
+ kIntrinsicRoundDouble,
kIntrinsicGet,
kIntrinsicCharAt,
kIntrinsicCompareTo,
diff --git a/test/082-inline-execute/src/Main.java b/test/082-inline-execute/src/Main.java
index 9ecc0a0..56972ff 100644
--- a/test/082-inline-execute/src/Main.java
+++ b/test/082-inline-execute/src/Main.java
@@ -34,6 +34,11 @@
test_Math_max_F();
test_Math_min_D();
test_Math_max_D();
+ test_Math_ceil();
+ test_Math_floor();
+ test_Math_rint();
+ test_Math_round_D();
+ test_Math_round_F();
test_Short_reverseBytes();
test_Integer_reverseBytes();
test_Long_reverseBytes();
@@ -49,6 +54,11 @@
test_StrictMath_max_F();
test_StrictMath_min_D();
test_StrictMath_max_D();
+ test_StrictMath_ceil();
+ test_StrictMath_floor();
+ test_StrictMath_rint();
+ test_StrictMath_round_D();
+ test_StrictMath_round_F();
test_String_charAt();
test_String_compareTo();
test_String_indexOf();
@@ -376,6 +386,104 @@
Assert.assertEquals(Math.max(Double.MIN_VALUE, Double.MAX_VALUE), Double.MAX_VALUE);
}
+ public static void test_Math_ceil() {
+ Assert.assertEquals(Math.ceil(+0.0), +0.0d, 0.0);
+ Assert.assertEquals(Math.ceil(-0.0), -0.0d, 0.0);
+ Assert.assertEquals(Math.ceil(-0.9), -0.0d, 0.0);
+ Assert.assertEquals(Math.ceil(-0.5), -0.0d, 0.0);
+ Assert.assertEquals(Math.ceil(0.0), -0.0d, 0.0);
+ Assert.assertEquals(Math.ceil(+2.0), +2.0d, 0.0);
+ Assert.assertEquals(Math.ceil(+2.1), +3.0d, 0.0);
+ Assert.assertEquals(Math.ceil(+2.5), +3.0d, 0.0);
+ Assert.assertEquals(Math.ceil(+2.9), +3.0d, 0.0);
+ Assert.assertEquals(Math.ceil(+3.0), +3.0d, 0.0);
+ Assert.assertEquals(Math.ceil(-2.0), -2.0d, 0.0);
+ Assert.assertEquals(Math.ceil(-2.1), -2.0d, 0.0);
+ Assert.assertEquals(Math.ceil(-2.5), -2.0d, 0.0);
+ Assert.assertEquals(Math.ceil(-2.9), -2.0d, 0.0);
+ Assert.assertEquals(Math.ceil(-3.0), -3.0d, 0.0);
+ Assert.assertEquals(Math.ceil(Double.NaN), Double.NaN, 0.0);
+ Assert.assertEquals(Math.ceil(Double.POSITIVE_INFINITY), Double.POSITIVE_INFINITY, 0.0);
+ Assert.assertEquals(Math.ceil(Double.NEGATIVE_INFINITY), Double.NEGATIVE_INFINITY, 0.0);
+ }
+
+ public static void test_Math_floor() {
+ Assert.assertEquals(Math.floor(+0.0), +0.0d, 0.0);
+ Assert.assertEquals(Math.floor(-0.0), -0.0d, 0.0);
+ Assert.assertEquals(Math.floor(+2.0), +2.0d, 0.0);
+ Assert.assertEquals(Math.floor(+2.1), +2.0d, 0.0);
+ Assert.assertEquals(Math.floor(+2.5), +2.0d, 0.0);
+ Assert.assertEquals(Math.floor(+2.9), +2.0d, 0.0);
+ Assert.assertEquals(Math.floor(+3.0), +3.0d, 0.0);
+ Assert.assertEquals(Math.floor(-2.0), -2.0d, 0.0);
+ Assert.assertEquals(Math.floor(-2.1), -3.0d, 0.0);
+ Assert.assertEquals(Math.floor(-2.5), -3.0d, 0.0);
+ Assert.assertEquals(Math.floor(-2.9), -3.0d, 0.0);
+ Assert.assertEquals(Math.floor(-3.0), -3.0d, 0.0);
+ Assert.assertEquals(Math.floor(Double.NaN), Double.NaN, 0.0);
+ Assert.assertEquals(Math.floor(Double.POSITIVE_INFINITY), Double.POSITIVE_INFINITY, 0.0);
+ Assert.assertEquals(Math.floor(Double.NEGATIVE_INFINITY), Double.NEGATIVE_INFINITY, 0.0);
+ }
+
+ public static void test_Math_rint() {
+ Assert.assertEquals(Math.rint(+0.0), +0.0d, 0.0);
+ Assert.assertEquals(Math.rint(-0.0), -0.0d, 0.0);
+ Assert.assertEquals(Math.rint(+2.0), +2.0d, 0.0);
+ Assert.assertEquals(Math.rint(+2.1), +2.0d, 0.0);
+ Assert.assertEquals(Math.rint(+2.5), +2.0d, 0.0);
+ Assert.assertEquals(Math.rint(+2.9), +3.0d, 0.0);
+ Assert.assertEquals(Math.rint(+3.0), +3.0d, 0.0);
+ Assert.assertEquals(Math.rint(-2.0), -2.0d, 0.0);
+ Assert.assertEquals(Math.rint(-2.1), -2.0d, 0.0);
+ Assert.assertEquals(Math.rint(-2.5), -2.0d, 0.0);
+ Assert.assertEquals(Math.rint(-2.9), -3.0d, 0.0);
+ Assert.assertEquals(Math.rint(-3.0), -3.0d, 0.0);
+ Assert.assertEquals(Math.rint(Double.NaN), Double.NaN, 0.0);
+ Assert.assertEquals(Math.rint(Double.POSITIVE_INFINITY), Double.POSITIVE_INFINITY, 0.0);
+ Assert.assertEquals(Math.rint(Double.NEGATIVE_INFINITY), Double.NEGATIVE_INFINITY, 0.0);
+ }
+
+ public static void test_Math_round_D() {
+ Assert.assertEquals(Math.round(+0.0d), (long)+0.0);
+ Assert.assertEquals(Math.round(-0.0d), (long)+0.0);
+ Assert.assertEquals(Math.round(2.0d), 2l);
+ Assert.assertEquals(Math.round(2.1d), 2l);
+ Assert.assertEquals(Math.round(2.5d), 3l);
+ Assert.assertEquals(Math.round(2.9d), 3l);
+ Assert.assertEquals(Math.round(3.0d), 3l);
+ Assert.assertEquals(Math.round(-2.0d), -2l);
+ Assert.assertEquals(Math.round(-2.1d), -2l);
+ Assert.assertEquals(Math.round(-2.5d), -2l);
+ Assert.assertEquals(Math.round(-2.9d), -3l);
+ Assert.assertEquals(Math.round(-3.0d), -3l);
+ Assert.assertEquals(Math.round(0.49999999999999994d), 1l);
+ Assert.assertEquals(Math.round(Double.NaN), (long)+0.0d);
+ Assert.assertEquals(Math.round(Long.MAX_VALUE + 1.0d), Long.MAX_VALUE);
+ Assert.assertEquals(Math.round(Long.MIN_VALUE - 1.0d), Long.MIN_VALUE);
+ Assert.assertEquals(Math.round(Double.POSITIVE_INFINITY), Long.MAX_VALUE);
+ Assert.assertEquals(Math.round(Double.NEGATIVE_INFINITY), Long.MIN_VALUE);
+ }
+
+ public static void test_Math_round_F() {
+ Assert.assertEquals(Math.round(+0.0f), (int)+0.0);
+ Assert.assertEquals(Math.round(-0.0f), (int)+0.0);
+ Assert.assertEquals(Math.round(2.0f), 2);
+ Assert.assertEquals(Math.round(2.1f), 2);
+ Assert.assertEquals(Math.round(2.5f), 3);
+ Assert.assertEquals(Math.round(2.9f), 3);
+ Assert.assertEquals(Math.round(3.0f), 3);
+ Assert.assertEquals(Math.round(-2.0f), -2);
+ Assert.assertEquals(Math.round(-2.1f), -2);
+ Assert.assertEquals(Math.round(-2.5f), -2);
+ Assert.assertEquals(Math.round(-2.9f), -3);
+ Assert.assertEquals(Math.round(-3.0f), -3);
+ Assert.assertEquals(Math.round(Float.NaN), (int)+0.0f);
+ Assert.assertEquals(Math.round(Integer.MAX_VALUE + 1.0f), Integer.MAX_VALUE);
+ Assert.assertEquals(Math.round(Integer.MIN_VALUE - 1.0f), Integer.MIN_VALUE);
+ Assert.assertEquals(Math.round(Float.POSITIVE_INFINITY), Integer.MAX_VALUE);
+ Assert.assertEquals(Math.round(Float.NEGATIVE_INFINITY), Integer.MIN_VALUE);
+ }
+
public static void test_StrictMath_abs_I() {
Assert.assertEquals(StrictMath.abs(0), 0);
Assert.assertEquals(StrictMath.abs(123), 123);
@@ -487,6 +595,104 @@
Assert.assertEquals(StrictMath.max(Double.MIN_VALUE, Double.MAX_VALUE), Double.MAX_VALUE);
}
+ public static void test_StrictMath_ceil() {
+ Assert.assertEquals(StrictMath.ceil(+0.0), +0.0d, 0.0);
+ Assert.assertEquals(StrictMath.ceil(-0.0), -0.0d, 0.0);
+ Assert.assertEquals(StrictMath.ceil(-0.9), -0.0d, 0.0);
+ Assert.assertEquals(StrictMath.ceil(-0.5), -0.0d, 0.0);
+ Assert.assertEquals(StrictMath.ceil(0.0), -0.0d, 0.0);
+ Assert.assertEquals(StrictMath.ceil(+2.0), +2.0d, 0.0);
+ Assert.assertEquals(StrictMath.ceil(+2.1), +3.0d, 0.0);
+ Assert.assertEquals(StrictMath.ceil(+2.5), +3.0d, 0.0);
+ Assert.assertEquals(StrictMath.ceil(+2.9), +3.0d, 0.0);
+ Assert.assertEquals(StrictMath.ceil(+3.0), +3.0d, 0.0);
+ Assert.assertEquals(StrictMath.ceil(-2.0), -2.0d, 0.0);
+ Assert.assertEquals(StrictMath.ceil(-2.1), -2.0d, 0.0);
+ Assert.assertEquals(StrictMath.ceil(-2.5), -2.0d, 0.0);
+ Assert.assertEquals(StrictMath.ceil(-2.9), -2.0d, 0.0);
+ Assert.assertEquals(StrictMath.ceil(-3.0), -3.0d, 0.0);
+ Assert.assertEquals(StrictMath.ceil(Double.NaN), Double.NaN, 0.0);
+ Assert.assertEquals(StrictMath.ceil(Double.POSITIVE_INFINITY), Double.POSITIVE_INFINITY, 0.0);
+ Assert.assertEquals(StrictMath.ceil(Double.NEGATIVE_INFINITY), Double.NEGATIVE_INFINITY, 0.0);
+ }
+
+ public static void test_StrictMath_floor() {
+ Assert.assertEquals(StrictMath.floor(+0.0), +0.0d, 0.0);
+ Assert.assertEquals(StrictMath.floor(-0.0), -0.0d, 0.0);
+ Assert.assertEquals(StrictMath.floor(+2.0), +2.0d, 0.0);
+ Assert.assertEquals(StrictMath.floor(+2.1), +2.0d, 0.0);
+ Assert.assertEquals(StrictMath.floor(+2.5), +2.0d, 0.0);
+ Assert.assertEquals(StrictMath.floor(+2.9), +2.0d, 0.0);
+ Assert.assertEquals(StrictMath.floor(+3.0), +3.0d, 0.0);
+ Assert.assertEquals(StrictMath.floor(-2.0), -2.0d, 0.0);
+ Assert.assertEquals(StrictMath.floor(-2.1), -3.0d, 0.0);
+ Assert.assertEquals(StrictMath.floor(-2.5), -3.0d, 0.0);
+ Assert.assertEquals(StrictMath.floor(-2.9), -3.0d, 0.0);
+ Assert.assertEquals(StrictMath.floor(-3.0), -3.0d, 0.0);
+ Assert.assertEquals(StrictMath.floor(Double.NaN), Double.NaN, 0.0);
+ Assert.assertEquals(StrictMath.floor(Double.POSITIVE_INFINITY), Double.POSITIVE_INFINITY, 0.0);
+ Assert.assertEquals(StrictMath.floor(Double.NEGATIVE_INFINITY), Double.NEGATIVE_INFINITY, 0.0);
+ }
+
+ public static void test_StrictMath_rint() {
+ Assert.assertEquals(StrictMath.rint(+0.0), +0.0d, 0.0);
+ Assert.assertEquals(StrictMath.rint(-0.0), -0.0d, 0.0);
+ Assert.assertEquals(StrictMath.rint(+2.0), +2.0d, 0.0);
+ Assert.assertEquals(StrictMath.rint(+2.1), +2.0d, 0.0);
+ Assert.assertEquals(StrictMath.rint(+2.5), +2.0d, 0.0);
+ Assert.assertEquals(StrictMath.rint(+2.9), +3.0d, 0.0);
+ Assert.assertEquals(StrictMath.rint(+3.0), +3.0d, 0.0);
+ Assert.assertEquals(StrictMath.rint(-2.0), -2.0d, 0.0);
+ Assert.assertEquals(StrictMath.rint(-2.1), -2.0d, 0.0);
+ Assert.assertEquals(StrictMath.rint(-2.5), -2.0d, 0.0);
+ Assert.assertEquals(StrictMath.rint(-2.9), -3.0d, 0.0);
+ Assert.assertEquals(StrictMath.rint(-3.0), -3.0d, 0.0);
+ Assert.assertEquals(StrictMath.rint(Double.NaN), Double.NaN, 0.0);
+ Assert.assertEquals(StrictMath.rint(Double.POSITIVE_INFINITY), Double.POSITIVE_INFINITY, 0.0);
+ Assert.assertEquals(StrictMath.rint(Double.NEGATIVE_INFINITY), Double.NEGATIVE_INFINITY, 0.0);
+ }
+
+ public static void test_StrictMath_round_D() {
+ Assert.assertEquals(StrictMath.round(+0.0d), (long)+0.0);
+ Assert.assertEquals(StrictMath.round(-0.0d), (long)+0.0);
+ Assert.assertEquals(StrictMath.round(2.0d), 2l);
+ Assert.assertEquals(StrictMath.round(2.1d), 2l);
+ Assert.assertEquals(StrictMath.round(2.5d), 3l);
+ Assert.assertEquals(StrictMath.round(2.9d), 3l);
+ Assert.assertEquals(StrictMath.round(3.0d), 3l);
+ Assert.assertEquals(StrictMath.round(-2.0d), -2l);
+ Assert.assertEquals(StrictMath.round(-2.1d), -2l);
+ Assert.assertEquals(StrictMath.round(-2.5d), -2l);
+ Assert.assertEquals(StrictMath.round(-2.9d), -3l);
+ Assert.assertEquals(StrictMath.round(-3.0d), -3l);
+ Assert.assertEquals(StrictMath.round(0.49999999999999994d), 1l);
+ Assert.assertEquals(StrictMath.round(Double.NaN), (long)+0.0d);
+ Assert.assertEquals(StrictMath.round(Long.MAX_VALUE + 1.0d), Long.MAX_VALUE);
+ Assert.assertEquals(StrictMath.round(Long.MIN_VALUE - 1.0d), Long.MIN_VALUE);
+ Assert.assertEquals(StrictMath.round(Double.POSITIVE_INFINITY), Long.MAX_VALUE);
+ Assert.assertEquals(StrictMath.round(Double.NEGATIVE_INFINITY), Long.MIN_VALUE);
+ }
+
+ public static void test_StrictMath_round_F() {
+ Assert.assertEquals(StrictMath.round(+0.0f), (int)+0.0);
+ Assert.assertEquals(StrictMath.round(-0.0f), (int)+0.0);
+ Assert.assertEquals(StrictMath.round(2.0f), 2);
+ Assert.assertEquals(StrictMath.round(2.1f), 2);
+ Assert.assertEquals(StrictMath.round(2.5f), 3);
+ Assert.assertEquals(StrictMath.round(2.9f), 3);
+ Assert.assertEquals(StrictMath.round(3.0f), 3);
+ Assert.assertEquals(StrictMath.round(-2.0f), -2);
+ Assert.assertEquals(StrictMath.round(-2.1f), -2);
+ Assert.assertEquals(StrictMath.round(-2.5f), -2);
+ Assert.assertEquals(StrictMath.round(-2.9f), -3);
+ Assert.assertEquals(StrictMath.round(-3.0f), -3);
+ Assert.assertEquals(StrictMath.round(Float.NaN), (int)+0.0f);
+ Assert.assertEquals(StrictMath.round(Integer.MAX_VALUE + 1.0f), Integer.MAX_VALUE);
+ Assert.assertEquals(StrictMath.round(Integer.MIN_VALUE - 1.0f), Integer.MIN_VALUE);
+ Assert.assertEquals(StrictMath.round(Float.POSITIVE_INFINITY), Integer.MAX_VALUE);
+ Assert.assertEquals(StrictMath.round(Float.NEGATIVE_INFINITY), Integer.MIN_VALUE);
+ }
+
public static void test_Float_floatToRawIntBits() {
Assert.assertEquals(Float.floatToRawIntBits(-1.0f), 0xbf800000);
Assert.assertEquals(Float.floatToRawIntBits(0.0f), 0);