Fix art test failures for Mips.

This patch fixes the following art test failures for Mips:
003-omnibus-opcodes
030-bad-finalizer
041-narrowing
059-finalizer-throw

Change-Id: I4e0e9ff75f949c92059dd6b8d579450dc15f4467
Signed-off-by: Douglas Leung <douglas@mips.com>
diff --git a/compiler/dex/quick/arm/codegen_arm.h b/compiler/dex/quick/arm/codegen_arm.h
index d4b0de7..582af51 100644
--- a/compiler/dex/quick/arm/codegen_arm.h
+++ b/compiler/dex/quick/arm/codegen_arm.h
@@ -85,8 +85,6 @@
     size_t GetInsnSize(LIR* lir) OVERRIDE;
     bool IsUnconditionalBranch(LIR* lir);
 
-    // Check support for volatile load/store of a given size.
-    bool SupportsVolatileLoadStore(OpSize size) OVERRIDE;
     // Get the register class for load/store of a field.
     RegisterClass RegClassForFieldLoadStore(OpSize size, bool is_volatile) OVERRIDE;
 
diff --git a/compiler/dex/quick/arm/target_arm.cc b/compiler/dex/quick/arm/target_arm.cc
index ef94bbc..8cc7596 100644
--- a/compiler/dex/quick/arm/target_arm.cc
+++ b/compiler/dex/quick/arm/target_arm.cc
@@ -538,10 +538,6 @@
   return ((lir->opcode == kThumbBUncond) || (lir->opcode == kThumb2BUncond));
 }
 
-bool ArmMir2Lir::SupportsVolatileLoadStore(OpSize size) {
-  return true;
-}
-
 RegisterClass ArmMir2Lir::RegClassForFieldLoadStore(OpSize size, bool is_volatile) {
   if (UNLIKELY(is_volatile)) {
     // On arm, atomic 64-bit load/store requires a core register pair.
diff --git a/compiler/dex/quick/arm64/codegen_arm64.h b/compiler/dex/quick/arm64/codegen_arm64.h
index de97653..c62b4fe 100644
--- a/compiler/dex/quick/arm64/codegen_arm64.h
+++ b/compiler/dex/quick/arm64/codegen_arm64.h
@@ -146,8 +146,6 @@
     size_t GetInsnSize(LIR* lir) OVERRIDE;
     bool IsUnconditionalBranch(LIR* lir);
 
-    // Check support for volatile load/store of a given size.
-    bool SupportsVolatileLoadStore(OpSize size) OVERRIDE;
     // Get the register class for load/store of a field.
     RegisterClass RegClassForFieldLoadStore(OpSize size, bool is_volatile) OVERRIDE;
 
diff --git a/compiler/dex/quick/arm64/target_arm64.cc b/compiler/dex/quick/arm64/target_arm64.cc
index 6a27ad0..c3bbb44 100644
--- a/compiler/dex/quick/arm64/target_arm64.cc
+++ b/compiler/dex/quick/arm64/target_arm64.cc
@@ -569,10 +569,6 @@
   return (lir->opcode == kA64B1t);
 }
 
-bool Arm64Mir2Lir::SupportsVolatileLoadStore(OpSize size) {
-  return true;
-}
-
 RegisterClass Arm64Mir2Lir::RegClassForFieldLoadStore(OpSize size, bool is_volatile) {
   if (UNLIKELY(is_volatile)) {
     // On arm64, fp register load/store is atomic only for single bytes.
diff --git a/compiler/dex/quick/gen_common.cc b/compiler/dex/quick/gen_common.cc
index 1fc0cff..5d1b1fb 100644
--- a/compiler/dex/quick/gen_common.cc
+++ b/compiler/dex/quick/gen_common.cc
@@ -581,8 +581,7 @@
   const MirSFieldLoweringInfo& field_info = mir_graph_->GetSFieldLoweringInfo(mir);
   cu_->compiler_driver->ProcessedStaticField(field_info.FastPut(), field_info.IsReferrersClass());
   OpSize store_size = LoadStoreOpSize(is_long_or_double, is_object);
-  if (!SLOW_FIELD_PATH && field_info.FastPut() &&
-      (!field_info.IsVolatile() || SupportsVolatileLoadStore(store_size))) {
+  if (!SLOW_FIELD_PATH && field_info.FastPut()) {
     DCHECK_GE(field_info.FieldOffset().Int32Value(), 0);
     RegStorage r_base;
     if (field_info.IsReferrersClass()) {
@@ -682,8 +681,7 @@
   const MirSFieldLoweringInfo& field_info = mir_graph_->GetSFieldLoweringInfo(mir);
   cu_->compiler_driver->ProcessedStaticField(field_info.FastGet(), field_info.IsReferrersClass());
   OpSize load_size = LoadStoreOpSize(is_long_or_double, is_object);
-  if (!SLOW_FIELD_PATH && field_info.FastGet() &&
-      (!field_info.IsVolatile() || SupportsVolatileLoadStore(load_size))) {
+  if (!SLOW_FIELD_PATH && field_info.FastGet()) {
     DCHECK_GE(field_info.FieldOffset().Int32Value(), 0);
     RegStorage r_base;
     if (field_info.IsReferrersClass()) {
@@ -787,6 +785,8 @@
       is_long_or_double ? QUICK_ENTRYPOINT_OFFSET(pointer_size, pGet64Instance)
           : (is_object ? QUICK_ENTRYPOINT_OFFSET(pointer_size, pGetObjInstance)
               : QUICK_ENTRYPOINT_OFFSET(pointer_size, pGet32Instance));
+  // Second argument of pGetXXInstance is always a reference.
+  DCHECK_EQ(static_cast<unsigned int>(rl_obj.wide), 0U);
   mir_to_lir->CallRuntimeHelperImmRegLocation(getter_offset, field_info->FieldIndex(), rl_obj,
                                               true);
 }
@@ -797,8 +797,7 @@
   const MirIFieldLoweringInfo& field_info = mir_graph_->GetIFieldLoweringInfo(mir);
   cu_->compiler_driver->ProcessedInstanceField(field_info.FastGet());
   OpSize load_size = LoadStoreOpSize(is_long_or_double, is_object);
-  if (!SLOW_FIELD_PATH && field_info.FastGet() &&
-      (!field_info.IsVolatile() || SupportsVolatileLoadStore(load_size))) {
+  if (!SLOW_FIELD_PATH && field_info.FastGet()) {
     RegisterClass reg_class = RegClassForFieldLoadStore(load_size, field_info.IsVolatile());
     DCHECK_GE(field_info.FieldOffset().Int32Value(), 0);
     rl_obj = LoadValue(rl_obj, kRefReg);
@@ -854,8 +853,7 @@
   const MirIFieldLoweringInfo& field_info = mir_graph_->GetIFieldLoweringInfo(mir);
   cu_->compiler_driver->ProcessedInstanceField(field_info.FastPut());
   OpSize store_size = LoadStoreOpSize(is_long_or_double, is_object);
-  if (!SLOW_FIELD_PATH && field_info.FastPut() &&
-      (!field_info.IsVolatile() || SupportsVolatileLoadStore(store_size))) {
+  if (!SLOW_FIELD_PATH && field_info.FastPut()) {
     RegisterClass reg_class = RegClassForFieldLoadStore(store_size, field_info.IsVolatile());
     DCHECK_GE(field_info.FieldOffset().Int32Value(), 0);
     rl_obj = LoadValue(rl_obj, kRefReg);
diff --git a/compiler/dex/quick/mips/codegen_mips.h b/compiler/dex/quick/mips/codegen_mips.h
index 2c33377..4a06086 100644
--- a/compiler/dex/quick/mips/codegen_mips.h
+++ b/compiler/dex/quick/mips/codegen_mips.h
@@ -47,6 +47,8 @@
                           OpSize size) OVERRIDE;
     LIR* StoreBaseIndexedDisp(RegStorage r_base, RegStorage r_index, int scale, int displacement,
                               RegStorage r_src, OpSize size) OVERRIDE;
+    LIR* GenAtomic64Load(RegStorage r_base, int displacement, RegStorage r_dest);
+    LIR* GenAtomic64Store(RegStorage r_base, int displacement, RegStorage r_src);
     void MarkGCCard(RegStorage val_reg, RegStorage tgt_addr_reg);
 
     // Required for target - register utilities.
@@ -83,8 +85,6 @@
     size_t GetInsnSize(LIR* lir) OVERRIDE;
     bool IsUnconditionalBranch(LIR* lir);
 
-    // Check support for volatile load/store of a given size.
-    bool SupportsVolatileLoadStore(OpSize size) OVERRIDE;
     // Get the register class for load/store of a field.
     RegisterClass RegClassForFieldLoadStore(OpSize size, bool is_volatile) OVERRIDE;
 
diff --git a/compiler/dex/quick/mips/target_mips.cc b/compiler/dex/quick/mips/target_mips.cc
index a5b7824..4ba94c4 100644
--- a/compiler/dex/quick/mips/target_mips.cc
+++ b/compiler/dex/quick/mips/target_mips.cc
@@ -496,6 +496,39 @@
   return inst;
 }
 
+LIR* MipsMir2Lir::GenAtomic64Load(RegStorage r_base, int displacement, RegStorage r_dest) {
+  DCHECK(!r_dest.IsFloat());  // See RegClassForFieldLoadStore().
+  DCHECK(r_dest.IsPair());
+  ClobberCallerSave();
+  LockCallTemps();  // Using fixed registers
+  RegStorage reg_ptr = TargetReg(kArg0);
+  OpRegRegImm(kOpAdd, reg_ptr, r_base, displacement);
+  RegStorage r_tgt = LoadHelper(QUICK_ENTRYPOINT_OFFSET(4, pA64Load));
+  LIR *ret = OpReg(kOpBlx, r_tgt);
+  RegStorage reg_ret = RegStorage::MakeRegPair(TargetReg(kRet0), TargetReg(kRet1));
+  OpRegCopyWide(r_dest, reg_ret);
+  return ret;
+}
+
+LIR* MipsMir2Lir::GenAtomic64Store(RegStorage r_base, int displacement, RegStorage r_src) {
+  DCHECK(!r_src.IsFloat());  // See RegClassForFieldLoadStore().
+  DCHECK(r_src.IsPair());
+  ClobberCallerSave();
+  LockCallTemps();  // Using fixed registers
+  RegStorage temp_ptr = AllocTemp();
+  OpRegRegImm(kOpAdd, temp_ptr, r_base, displacement);
+  RegStorage temp_value = AllocTempWide();
+  OpRegCopyWide(temp_value, r_src);
+  RegStorage reg_ptr = TargetReg(kArg0);
+  OpRegCopy(reg_ptr, temp_ptr);
+  RegStorage reg_value = RegStorage::MakeRegPair(TargetReg(kArg2), TargetReg(kArg3));
+  OpRegCopyWide(reg_value, temp_value);
+  FreeTemp(temp_ptr);
+  FreeTemp(temp_value);
+  RegStorage r_tgt = LoadHelper(QUICK_ENTRYPOINT_OFFSET(4, pA64Store));
+  return OpReg(kOpBlx, r_tgt);
+}
+
 void MipsMir2Lir::SpillCoreRegs() {
   if (num_core_spills_ == 0) {
     return;
@@ -530,17 +563,12 @@
   return (lir->opcode == kMipsB);
 }
 
-bool MipsMir2Lir::SupportsVolatileLoadStore(OpSize size) {
-  // No support for 64-bit atomic load/store on mips.
-  return size != k64 && size != kDouble;
-}
-
 RegisterClass MipsMir2Lir::RegClassForFieldLoadStore(OpSize size, bool is_volatile) {
   if (UNLIKELY(is_volatile)) {
-    // On Mips, atomic 64-bit load/store requires an fp register.
+    // On Mips, atomic 64-bit load/store requires a core register.
     // Smaller aligned load/store is atomic for both core and fp registers.
     if (size == k64 || size == kDouble) {
-      return kFPReg;
+      return kCoreReg;
     }
   }
   // TODO: Verify that both core and fp registers are suitable for smaller sizes.
diff --git a/compiler/dex/quick/mips/utility_mips.cc b/compiler/dex/quick/mips/utility_mips.cc
index 75d3c5d..0e8188b 100644
--- a/compiler/dex/quick/mips/utility_mips.cc
+++ b/compiler/dex/quick/mips/utility_mips.cc
@@ -551,8 +551,9 @@
 
 LIR* MipsMir2Lir::LoadBaseDisp(RegStorage r_base, int displacement, RegStorage r_dest,
                                OpSize size, VolatileKind is_volatile) {
-  if (is_volatile == kVolatile) {
-    DCHECK(size != k64 && size != kDouble);
+  if (UNLIKELY(is_volatile == kVolatile && (size == k64 || size == kDouble))) {
+    // Do atomic 64-bit load.
+    return GenAtomic64Load(r_base, displacement, r_dest);
   }
 
   // TODO: base this on target.
@@ -654,17 +655,21 @@
 LIR* MipsMir2Lir::StoreBaseDisp(RegStorage r_base, int displacement, RegStorage r_src,
                                 OpSize size, VolatileKind is_volatile) {
   if (is_volatile == kVolatile) {
-    DCHECK(size != k64 && size != kDouble);
     // Ensure that prior accesses become visible to other threads first.
     GenMemBarrier(kAnyStore);
   }
 
-  // TODO: base this on target.
-  if (size == kWord) {
-    size = k32;
-  }
   LIR* store;
-  store = StoreBaseDispBody(r_base, displacement, r_src, size);
+  if (UNLIKELY(is_volatile == kVolatile && (size == k64 || size == kDouble))) {
+    // Do atomic 64-bit load.
+    store = GenAtomic64Store(r_base, displacement, r_src);
+  } else {
+    // TODO: base this on target.
+    if (size == kWord) {
+      size = k32;
+    }
+    store = StoreBaseDispBody(r_base, displacement, r_src, size);
+  }
 
   if (UNLIKELY(is_volatile == kVolatile)) {
     // Preserve order with respect to any subsequent volatile loads.
diff --git a/compiler/dex/quick/mir_to_lir.cc b/compiler/dex/quick/mir_to_lir.cc
index 07c615f..b1f0b13 100644
--- a/compiler/dex/quick/mir_to_lir.cc
+++ b/compiler/dex/quick/mir_to_lir.cc
@@ -227,9 +227,6 @@
   bool wide = (data.op_variant == InlineMethodAnalyser::IGetVariant(Instruction::IGET_WIDE));
   bool ref = (data.op_variant == InlineMethodAnalyser::IGetVariant(Instruction::IGET_OBJECT));
   OpSize size = LoadStoreOpSize(wide, ref);
-  if (data.is_volatile && !SupportsVolatileLoadStore(size)) {
-    return false;
-  }
 
   // Point of no return - no aborts after this
   GenPrintLabel(mir);
@@ -274,9 +271,6 @@
   bool wide = (data.op_variant == InlineMethodAnalyser::IPutVariant(Instruction::IPUT_WIDE));
   bool ref = (data.op_variant == InlineMethodAnalyser::IGetVariant(Instruction::IGET_OBJECT));
   OpSize size = LoadStoreOpSize(wide, ref);
-  if (data.is_volatile && !SupportsVolatileLoadStore(size)) {
-    return false;
-  }
 
   // Point of no return - no aborts after this
   GenPrintLabel(mir);
diff --git a/compiler/dex/quick/mir_to_lir.h b/compiler/dex/quick/mir_to_lir.h
index c68ad6b..33e980f 100644
--- a/compiler/dex/quick/mir_to_lir.h
+++ b/compiler/dex/quick/mir_to_lir.h
@@ -1271,8 +1271,6 @@
     virtual size_t GetInsnSize(LIR* lir) = 0;
     virtual bool IsUnconditionalBranch(LIR* lir) = 0;
 
-    // Check support for volatile load/store of a given size.
-    virtual bool SupportsVolatileLoadStore(OpSize size) = 0;
     // Get the register class for load/store of a field.
     virtual RegisterClass RegClassForFieldLoadStore(OpSize size, bool is_volatile) = 0;
 
diff --git a/compiler/dex/quick/x86/codegen_x86.h b/compiler/dex/quick/x86/codegen_x86.h
index f4fa1b4..1e80247 100644
--- a/compiler/dex/quick/x86/codegen_x86.h
+++ b/compiler/dex/quick/x86/codegen_x86.h
@@ -144,8 +144,6 @@
   size_t GetInsnSize(LIR* lir) OVERRIDE;
   bool IsUnconditionalBranch(LIR* lir);
 
-  // Check support for volatile load/store of a given size.
-  bool SupportsVolatileLoadStore(OpSize size) OVERRIDE;
   // Get the register class for load/store of a field.
   RegisterClass RegClassForFieldLoadStore(OpSize size, bool is_volatile) OVERRIDE;
 
diff --git a/compiler/dex/quick/x86/target_x86.cc b/compiler/dex/quick/x86/target_x86.cc
index bb1f379..1e4494b 100755
--- a/compiler/dex/quick/x86/target_x86.cc
+++ b/compiler/dex/quick/x86/target_x86.cc
@@ -755,10 +755,6 @@
   return (lir->opcode == kX86Jmp8 || lir->opcode == kX86Jmp32);
 }
 
-bool X86Mir2Lir::SupportsVolatileLoadStore(OpSize size) {
-  return true;
-}
-
 RegisterClass X86Mir2Lir::RegClassForFieldLoadStore(OpSize size, bool is_volatile) {
   // X86_64 can handle any size.
   if (cu_->target64) {
diff --git a/compiler/oat_test.cc b/compiler/oat_test.cc
index 1444ca0..8aa7b76 100644
--- a/compiler/oat_test.cc
+++ b/compiler/oat_test.cc
@@ -189,7 +189,7 @@
   EXPECT_EQ(80U, sizeof(OatHeader));
   EXPECT_EQ(8U, sizeof(OatMethodOffsets));
   EXPECT_EQ(24U, sizeof(OatQuickMethodHeader));
-  EXPECT_EQ(77 * GetInstructionSetPointerSize(kRuntimeISA), sizeof(QuickEntryPoints));
+  EXPECT_EQ(79 * GetInstructionSetPointerSize(kRuntimeISA), sizeof(QuickEntryPoints));
 }
 
 TEST_F(OatTest, OatHeaderIsValid) {
diff --git a/runtime/arch/mips/entrypoints_init_mips.cc b/runtime/arch/mips/entrypoints_init_mips.cc
index 7a2e961..d3e7d5e 100644
--- a/runtime/arch/mips/entrypoints_init_mips.cc
+++ b/runtime/arch/mips/entrypoints_init_mips.cc
@@ -21,6 +21,7 @@
 #include "entrypoints/quick/quick_entrypoints.h"
 #include "entrypoints/entrypoint_utils.h"
 #include "entrypoints/math_entrypoints.h"
+#include "atomic.h"
 
 namespace art {
 
@@ -196,11 +197,11 @@
   qpoints->pCmplDouble = CmplDouble;
   qpoints->pCmplFloat = CmplFloat;
   qpoints->pFmod = fmod;
-  qpoints->pL2d = __floatdidf;
+  qpoints->pL2d = art_l2d;
   qpoints->pFmodf = fmodf;
-  qpoints->pL2f = __floatdisf;
-  qpoints->pD2iz = __fixdfsi;
-  qpoints->pF2iz = __fixsfsi;
+  qpoints->pL2f = art_l2f;
+  qpoints->pD2iz = art_d2i;
+  qpoints->pF2iz = art_f2i;
   qpoints->pIdivmod = NULL;
   qpoints->pD2l = art_d2l;
   qpoints->pF2l = art_f2l;
@@ -236,6 +237,10 @@
   qpoints->pThrowNoSuchMethod = art_quick_throw_no_such_method;
   qpoints->pThrowNullPointer = art_quick_throw_null_pointer_exception;
   qpoints->pThrowStackOverflow = art_quick_throw_stack_overflow;
+
+  // Atomic 64-bit load/store
+  qpoints->pA64Load = QuasiAtomic::Read64;
+  qpoints->pA64Store = QuasiAtomic::Write64;
 };
 
 }  // namespace art
diff --git a/runtime/entrypoints/quick/quick_entrypoints.h b/runtime/entrypoints/quick/quick_entrypoints.h
index 032f6be..473687c 100644
--- a/runtime/entrypoints/quick/quick_entrypoints.h
+++ b/runtime/entrypoints/quick/quick_entrypoints.h
@@ -138,6 +138,10 @@
   void (*pThrowNoSuchMethod)(int32_t);
   void (*pThrowNullPointer)();
   void (*pThrowStackOverflow)(void*);
+
+  // Atomic 64-bit load/store
+  int64_t (*pA64Load)(volatile const int64_t *);
+  void (*pA64Store)(volatile int64_t *, int64_t);
 };
 
 
diff --git a/runtime/entrypoints_order_test.cc b/runtime/entrypoints_order_test.cc
index 79c68a2..ae1b94f 100644
--- a/runtime/entrypoints_order_test.cc
+++ b/runtime/entrypoints_order_test.cc
@@ -259,8 +259,10 @@
     EXPECT_OFFSET_DIFFNP(QuickEntryPoints, pThrowDivZero, pThrowNoSuchMethod, kPointerSize);
     EXPECT_OFFSET_DIFFNP(QuickEntryPoints, pThrowNoSuchMethod, pThrowNullPointer, kPointerSize);
     EXPECT_OFFSET_DIFFNP(QuickEntryPoints, pThrowNullPointer, pThrowStackOverflow, kPointerSize);
+    EXPECT_OFFSET_DIFFNP(QuickEntryPoints, pThrowStackOverflow, pA64Load, kPointerSize);
+    EXPECT_OFFSET_DIFFNP(QuickEntryPoints, pA64Load, pA64Store, kPointerSize);
 
-    CHECKED(OFFSETOF_MEMBER(QuickEntryPoints, pThrowStackOverflow)
+    CHECKED(OFFSETOF_MEMBER(QuickEntryPoints, pA64Store)
             + kPointerSize == sizeof(QuickEntryPoints), QuickEntryPoints_all);
   }
 };
diff --git a/runtime/thread.cc b/runtime/thread.cc
index dcd4f23..dd55ed1 100644
--- a/runtime/thread.cc
+++ b/runtime/thread.cc
@@ -1879,6 +1879,8 @@
   QUICK_ENTRY_POINT_INFO(pThrowNoSuchMethod)
   QUICK_ENTRY_POINT_INFO(pThrowNullPointer)
   QUICK_ENTRY_POINT_INFO(pThrowStackOverflow)
+  QUICK_ENTRY_POINT_INFO(pA64Load)
+  QUICK_ENTRY_POINT_INFO(pA64Store)
 #undef QUICK_ENTRY_POINT_INFO
 
   os << offset;