Merge "[MIPS] Fix several profiling issues caused by incorrect register saves"
diff --git a/vm/compiler/codegen/mips/mips/CallingConvention.S b/vm/compiler/codegen/mips/mips/CallingConvention.S
index ab97655..cfe2695 100644
--- a/vm/compiler/codegen/mips/mips/CallingConvention.S
+++ b/vm/compiler/codegen/mips/mips/CallingConvention.S
@@ -24,9 +24,17 @@
.global dvmJitCalleeSave
.type dvmJitCalleeSave, %function
dvmJitCalleeSave:
+#ifdef __mips_hard_float
+ /* For performance reasons, we are not using any "callee saved" */
+ /* fp registers, thus no need to save them. */
+#endif
jr $31
.global dvmJitCalleeRestore
.type dvmJitCalleeRestore, %function
dvmJitCalleeRestore:
+#ifdef __mips_hard_float
+ /* For performance reasons, we are not using any "callee saved" */
+ /* fp registers, thus no need to restore them. */
+#endif
jr $31
diff --git a/vm/mterp/mips/OP_CMPL_DOUBLE.S b/vm/mterp/mips/OP_CMPL_DOUBLE.S
index 63bb005..2c824b3 100644
--- a/vm/mterp/mips/OP_CMPL_DOUBLE.S
+++ b/vm/mterp/mips/OP_CMPL_DOUBLE.S
@@ -36,15 +36,15 @@
LOAD64(rARG0, rARG1, rOBJ) # a0/a1 <- vBB/vBB+1
b ${opcode}_continue
#else
- LOAD64_F(fs0, fs0f, rOBJ)
- LOAD64_F(fs1, fs1f, rBIX)
- c.olt.d fcc0, fs0, fs1
+ LOAD64_F(ft0, ft0f, rOBJ)
+ LOAD64_F(ft1, ft1f, rBIX)
+ c.olt.d fcc0, ft0, ft1
li rTEMP, -1
bc1t fcc0, ${opcode}_finish
- c.olt.d fcc0, fs1, fs0
+ c.olt.d fcc0, ft1, ft0
li rTEMP, 1
bc1t fcc0, ${opcode}_finish
- c.eq.d fcc0, fs0, fs1
+ c.eq.d fcc0, ft0, ft1
li rTEMP, 0
bc1t fcc0, ${opcode}_finish
b ${opcode}_nan
diff --git a/vm/mterp/mips/OP_CMPL_FLOAT.S b/vm/mterp/mips/OP_CMPL_FLOAT.S
index 6e07084..01db920 100644
--- a/vm/mterp/mips/OP_CMPL_FLOAT.S
+++ b/vm/mterp/mips/OP_CMPL_FLOAT.S
@@ -45,15 +45,15 @@
move a1, rBIX # a1 <- vCC
b ${opcode}_continue
#else
- GET_VREG_F(fs0, a2)
- GET_VREG_F(fs1, a3)
- c.olt.s fcc0, fs0, fs1 # Is fs0 < fs1
+ GET_VREG_F(ft0, a2)
+ GET_VREG_F(ft1, a3)
+ c.olt.s fcc0, ft0, ft1 # Is ft0 < ft1
li rTEMP, -1
bc1t fcc0, ${opcode}_finish
- c.olt.s fcc0, fs1, fs0
+ c.olt.s fcc0, ft1, ft0
li rTEMP, 1
bc1t fcc0, ${opcode}_finish
- c.eq.s fcc0, fs0, fs1
+ c.eq.s fcc0, ft0, ft1
li rTEMP, 0
bc1t fcc0, ${opcode}_finish
b ${opcode}_nan
diff --git a/vm/mterp/out/InterpAsm-mips.S b/vm/mterp/out/InterpAsm-mips.S
index dd43f1f..67cbdab 100644
--- a/vm/mterp/out/InterpAsm-mips.S
+++ b/vm/mterp/out/InterpAsm-mips.S
@@ -1542,15 +1542,15 @@
move a1, rBIX # a1 <- vCC
b OP_CMPL_FLOAT_continue
#else
- GET_VREG_F(fs0, a2)
- GET_VREG_F(fs1, a3)
- c.olt.s fcc0, fs0, fs1 # Is fs0 < fs1
+ GET_VREG_F(ft0, a2)
+ GET_VREG_F(ft1, a3)
+ c.olt.s fcc0, ft0, ft1 # Is ft0 < ft1
li rTEMP, -1
bc1t fcc0, OP_CMPL_FLOAT_finish
- c.olt.s fcc0, fs1, fs0
+ c.olt.s fcc0, ft1, ft0
li rTEMP, 1
bc1t fcc0, OP_CMPL_FLOAT_finish
- c.eq.s fcc0, fs0, fs1
+ c.eq.s fcc0, ft0, ft1
li rTEMP, 0
bc1t fcc0, OP_CMPL_FLOAT_finish
b OP_CMPL_FLOAT_nan
@@ -1605,15 +1605,15 @@
move a1, rBIX # a1 <- vCC
b OP_CMPG_FLOAT_continue
#else
- GET_VREG_F(fs0, a2)
- GET_VREG_F(fs1, a3)
- c.olt.s fcc0, fs0, fs1 # Is fs0 < fs1
+ GET_VREG_F(ft0, a2)
+ GET_VREG_F(ft1, a3)
+ c.olt.s fcc0, ft0, ft1 # Is ft0 < ft1
li rTEMP, -1
bc1t fcc0, OP_CMPG_FLOAT_finish
- c.olt.s fcc0, fs1, fs0
+ c.olt.s fcc0, ft1, ft0
li rTEMP, 1
bc1t fcc0, OP_CMPG_FLOAT_finish
- c.eq.s fcc0, fs0, fs1
+ c.eq.s fcc0, ft0, ft1
li rTEMP, 0
bc1t fcc0, OP_CMPG_FLOAT_finish
b OP_CMPG_FLOAT_nan
@@ -1659,15 +1659,15 @@
LOAD64(rARG0, rARG1, rOBJ) # a0/a1 <- vBB/vBB+1
b OP_CMPL_DOUBLE_continue
#else
- LOAD64_F(fs0, fs0f, rOBJ)
- LOAD64_F(fs1, fs1f, rBIX)
- c.olt.d fcc0, fs0, fs1
+ LOAD64_F(ft0, ft0f, rOBJ)
+ LOAD64_F(ft1, ft1f, rBIX)
+ c.olt.d fcc0, ft0, ft1
li rTEMP, -1
bc1t fcc0, OP_CMPL_DOUBLE_finish
- c.olt.d fcc0, fs1, fs0
+ c.olt.d fcc0, ft1, ft0
li rTEMP, 1
bc1t fcc0, OP_CMPL_DOUBLE_finish
- c.eq.d fcc0, fs0, fs1
+ c.eq.d fcc0, ft0, ft1
li rTEMP, 0
bc1t fcc0, OP_CMPL_DOUBLE_finish
b OP_CMPL_DOUBLE_nan
@@ -1711,15 +1711,15 @@
LOAD64(rARG0, rARG1, rOBJ) # a0/a1 <- vBB/vBB+1
b OP_CMPG_DOUBLE_continue
#else
- LOAD64_F(fs0, fs0f, rOBJ)
- LOAD64_F(fs1, fs1f, rBIX)
- c.olt.d fcc0, fs0, fs1
+ LOAD64_F(ft0, ft0f, rOBJ)
+ LOAD64_F(ft1, ft1f, rBIX)
+ c.olt.d fcc0, ft0, ft1
li rTEMP, -1
bc1t fcc0, OP_CMPG_DOUBLE_finish
- c.olt.d fcc0, fs1, fs0
+ c.olt.d fcc0, ft1, ft0
li rTEMP, 1
bc1t fcc0, OP_CMPG_DOUBLE_finish
- c.eq.d fcc0, fs0, fs1
+ c.eq.d fcc0, ft0, ft1
li rTEMP, 0
bc1t fcc0, OP_CMPG_DOUBLE_finish
b OP_CMPG_DOUBLE_nan