Merge "Add missing field init in constructors"
diff --git a/compiler/dex/quick/mips/codegen_mips.h b/compiler/dex/quick/mips/codegen_mips.h
index 43fbcbd..2173253 100644
--- a/compiler/dex/quick/mips/codegen_mips.h
+++ b/compiler/dex/quick/mips/codegen_mips.h
@@ -29,15 +29,18 @@
  protected:
   class InToRegStorageMipsMapper : public InToRegStorageMapper {
    public:
-    explicit InToRegStorageMipsMapper(Mir2Lir* m2l) : m2l_(m2l), cur_core_reg_(0) {}
+    explicit InToRegStorageMipsMapper(Mir2Lir* m2l) : m2l_(m2l), cur_core_reg_(0), cur_fpu_reg_(0)
+        {}
     virtual RegStorage GetNextReg(ShortyArg arg);
     virtual void Reset() OVERRIDE {
       cur_core_reg_ = 0;
+      cur_fpu_reg_ = 0;
     }
    protected:
     Mir2Lir* m2l_;
    private:
     size_t cur_core_reg_;
+    size_t cur_fpu_reg_;
   };
 
   class InToRegStorageMips64Mapper : public InToRegStorageMapper {
diff --git a/compiler/dex/quick/mips/target_mips.cc b/compiler/dex/quick/mips/target_mips.cc
index ec4bad7..09d37f8 100644
--- a/compiler/dex/quick/mips/target_mips.cc
+++ b/compiler/dex/quick/mips/target_mips.cc
@@ -195,7 +195,7 @@
 // Return a target-dependent special register.
 RegStorage MipsMir2Lir::TargetReg(SpecialTargetRegister reg, WideKind wide_kind) {
   if (!cu_->target64 && wide_kind == kWide) {
-    DCHECK((kArg0 <= reg && reg < kArg7) || (kFArg0 <= reg && reg < kFArg15) || (kRet0 == reg));
+    DCHECK((kArg0 <= reg && reg < kArg7) || (kFArg0 == reg) || (kFArg2 == reg) || (kRet0 == reg));
     RegStorage ret_reg = RegStorage::MakeRegPair(TargetReg(reg),
                                      TargetReg(static_cast<SpecialTargetRegister>(reg + 1)));
     if (!fpuIs32Bit_ && ret_reg.IsFloat()) {
@@ -250,14 +250,27 @@
 RegStorage MipsMir2Lir::InToRegStorageMipsMapper::GetNextReg(ShortyArg arg) {
   const SpecialTargetRegister coreArgMappingToPhysicalReg[] = {kArg1, kArg2, kArg3};
   const size_t coreArgMappingToPhysicalRegSize = arraysize(coreArgMappingToPhysicalReg);
+  const SpecialTargetRegister fpuArgMappingToPhysicalReg[] = {kFArg0, kFArg2};
+  const size_t fpuArgMappingToPhysicalRegSize = arraysize(fpuArgMappingToPhysicalReg);
 
   RegStorage result = RegStorage::InvalidReg();
-  if (cur_core_reg_ < coreArgMappingToPhysicalRegSize) {
-    result = m2l_->TargetReg(coreArgMappingToPhysicalReg[cur_core_reg_++],
-                             arg.IsRef() ? kRef : kNotWide);
-    if (arg.IsWide() && cur_core_reg_ < coreArgMappingToPhysicalRegSize) {
-      result = RegStorage::MakeRegPair(
-          result, m2l_->TargetReg(coreArgMappingToPhysicalReg[cur_core_reg_++], kNotWide));
+  if (arg.IsFP()) {
+    if (cur_fpu_reg_ < fpuArgMappingToPhysicalRegSize) {
+      result = m2l_->TargetReg(fpuArgMappingToPhysicalReg[cur_fpu_reg_++],
+                               arg.IsWide() ? kWide : kNotWide);
+    }
+  } else {
+    if (cur_core_reg_ < coreArgMappingToPhysicalRegSize) {
+      if (arg.IsWide() && cur_core_reg_ == 0) {
+        // Don't use a1-a2 as a register pair, move to a2-a3 instead.
+        cur_core_reg_++;
+      }
+      result = m2l_->TargetReg(coreArgMappingToPhysicalReg[cur_core_reg_++],
+                               arg.IsRef() ? kRef : kNotWide);
+      if (arg.IsWide() && cur_core_reg_ < coreArgMappingToPhysicalRegSize) {
+        result = RegStorage::MakeRegPair(
+            result, m2l_->TargetReg(coreArgMappingToPhysicalReg[cur_core_reg_++], kNotWide));
+      }
     }
   }
   return result;
@@ -654,6 +667,20 @@
     LockTemp(TargetReg(kArg5));
     LockTemp(TargetReg(kArg6));
     LockTemp(TargetReg(kArg7));
+  } else {
+    if (fpuIs32Bit_) {
+      LockTemp(TargetReg(kFArg0));
+      LockTemp(TargetReg(kFArg1));
+      LockTemp(TargetReg(kFArg2));
+      LockTemp(TargetReg(kFArg3));
+      LockTemp(rs_rD6_fr0);
+      LockTemp(rs_rD7_fr0);
+    } else {
+      LockTemp(TargetReg(kFArg0));
+      LockTemp(TargetReg(kFArg2));
+      LockTemp(rs_rD6_fr1);
+      LockTemp(rs_rD7_fr1);
+    }
   }
 }
 
@@ -668,6 +695,20 @@
     FreeTemp(TargetReg(kArg5));
     FreeTemp(TargetReg(kArg6));
     FreeTemp(TargetReg(kArg7));
+  } else {
+    if (fpuIs32Bit_) {
+      FreeTemp(TargetReg(kFArg0));
+      FreeTemp(TargetReg(kFArg1));
+      FreeTemp(TargetReg(kFArg2));
+      FreeTemp(TargetReg(kFArg3));
+      FreeTemp(rs_rD6_fr0);
+      FreeTemp(rs_rD7_fr0);
+    } else {
+      FreeTemp(TargetReg(kFArg0));
+      FreeTemp(TargetReg(kFArg2));
+      FreeTemp(rs_rD6_fr1);
+      FreeTemp(rs_rD7_fr1);
+    }
   }
   FreeTemp(TargetReg(kHiddenArg));
 }
diff --git a/compiler/jni/jni_cfi_test_expected.inc b/compiler/jni/jni_cfi_test_expected.inc
index dd4496f..8b5fdc3 100644
--- a/compiler/jni/jni_cfi_test_expected.inc
+++ b/compiler/jni/jni_cfi_test_expected.inc
@@ -329,17 +329,18 @@
     0x34, 0x00, 0xAF, 0xAF, 0x30, 0x00, 0xAE, 0xAF, 0x2C, 0x00, 0xAD, 0xAF,
     0x28, 0x00, 0xAC, 0xAF, 0x24, 0x00, 0xAB, 0xAF, 0x20, 0x00, 0xAA, 0xAF,
     0x1C, 0x00, 0xA9, 0xAF, 0x18, 0x00, 0xA8, 0xAF, 0x00, 0x00, 0xA4, 0xAF,
-    0x44, 0x00, 0xA5, 0xAF, 0x48, 0x00, 0xA6, 0xAF, 0x4C, 0x00, 0xA7, 0xAF,
-    0xE0, 0xFF, 0xBD, 0x27, 0x20, 0x00, 0xBD, 0x27, 0x18, 0x00, 0xA8, 0x8F,
-    0x1C, 0x00, 0xA9, 0x8F, 0x20, 0x00, 0xAA, 0x8F, 0x24, 0x00, 0xAB, 0x8F,
-    0x28, 0x00, 0xAC, 0x8F, 0x2C, 0x00, 0xAD, 0x8F, 0x30, 0x00, 0xAE, 0x8F,
-    0x34, 0x00, 0xAF, 0x8F, 0x38, 0x00, 0xB8, 0x8F, 0x3C, 0x00, 0xBF, 0x8F,
-    0x40, 0x00, 0xBD, 0x27, 0x09, 0x00, 0xE0, 0x03, 0x00, 0x00, 0x00, 0x00,
+    0x44, 0x00, 0xA5, 0xAF, 0x48, 0x00, 0xAC, 0xE7, 0x4C, 0x00, 0xA6, 0xAF,
+    0x50, 0x00, 0xA7, 0xAF, 0xE0, 0xFF, 0xBD, 0x27, 0x20, 0x00, 0xBD, 0x27,
+    0x18, 0x00, 0xA8, 0x8F, 0x1C, 0x00, 0xA9, 0x8F, 0x20, 0x00, 0xAA, 0x8F,
+    0x24, 0x00, 0xAB, 0x8F, 0x28, 0x00, 0xAC, 0x8F, 0x2C, 0x00, 0xAD, 0x8F,
+    0x30, 0x00, 0xAE, 0x8F, 0x34, 0x00, 0xAF, 0x8F, 0x38, 0x00, 0xB8, 0x8F,
+    0x3C, 0x00, 0xBF, 0x8F, 0x40, 0x00, 0xBD, 0x27, 0x09, 0x00, 0xE0, 0x03,
+    0x00, 0x00, 0x00, 0x00,
 };
 static constexpr uint8_t expected_cfi_kMips[] = {
     0x44, 0x0E, 0x40, 0x44, 0x9F, 0x01, 0x44, 0x98, 0x02, 0x44, 0x8F, 0x03,
     0x44, 0x8E, 0x04, 0x44, 0x8D, 0x05, 0x44, 0x8C, 0x06, 0x44, 0x8B, 0x07,
-    0x44, 0x8A, 0x08, 0x44, 0x89, 0x09, 0x44, 0x88, 0x0A, 0x54, 0x0E, 0x60,
+    0x44, 0x8A, 0x08, 0x44, 0x89, 0x09, 0x44, 0x88, 0x0A, 0x58, 0x0E, 0x60,
     0x44, 0x0E, 0x40, 0x0A, 0x44, 0xC8, 0x44, 0xC9, 0x44, 0xCA, 0x44, 0xCB,
     0x44, 0xCC, 0x44, 0xCD, 0x44, 0xCE, 0x44, 0xCF, 0x44, 0xD8, 0x44, 0xDF,
     0x44, 0x0E, 0x00, 0x48, 0x0B, 0x0E, 0x40,
@@ -368,39 +369,40 @@
 // 0x0000002c: .cfi_offset: r8 at cfa-40
 // 0x0000002c: sw r4, +0(r29)
 // 0x00000030: sw r5, +68(r29)
-// 0x00000034: sw r6, +72(r29)
-// 0x00000038: sw r7, +76(r29)
-// 0x0000003c: addiu r29, r29, -32
-// 0x00000040: .cfi_def_cfa_offset: 96
-// 0x00000040: addiu r29, r29, 32
-// 0x00000044: .cfi_def_cfa_offset: 64
-// 0x00000044: .cfi_remember_state
-// 0x00000044: lw r8, +24(r29)
-// 0x00000048: .cfi_restore: r8
-// 0x00000048: lw r9, +28(r29)
-// 0x0000004c: .cfi_restore: r9
-// 0x0000004c: lw r10, +32(r29)
-// 0x00000050: .cfi_restore: r10
-// 0x00000050: lw r11, +36(r29)
-// 0x00000054: .cfi_restore: r11
-// 0x00000054: lw r12, +40(r29)
-// 0x00000058: .cfi_restore: r12
-// 0x00000058: lw r13, +44(r29)
-// 0x0000005c: .cfi_restore: r13
-// 0x0000005c: lw r14, +48(r29)
-// 0x00000060: .cfi_restore: r14
-// 0x00000060: lw r15, +52(r29)
-// 0x00000064: .cfi_restore: r15
-// 0x00000064: lw r24, +56(r29)
-// 0x00000068: .cfi_restore: r24
-// 0x00000068: lw r31, +60(r29)
-// 0x0000006c: .cfi_restore: r31
-// 0x0000006c: addiu r29, r29, 64
-// 0x00000070: .cfi_def_cfa_offset: 0
-// 0x00000070: jr r31
-// 0x00000074: nop
-// 0x00000078: .cfi_restore_state
-// 0x00000078: .cfi_def_cfa_offset: 64
+// 0x00000034: swc1 f12, +72(r29)
+// 0x00000038: sw r6, +76(r29)
+// 0x0000003c: sw r7, +80(r29)
+// 0x00000040: addiu r29, r29, -32
+// 0x00000044: .cfi_def_cfa_offset: 96
+// 0x00000044: addiu r29, r29, 32
+// 0x00000048: .cfi_def_cfa_offset: 64
+// 0x00000048: .cfi_remember_state
+// 0x00000048: lw r8, +24(r29)
+// 0x0000004c: .cfi_restore: r8
+// 0x0000004c: lw r9, +28(r29)
+// 0x00000050: .cfi_restore: r9
+// 0x00000050: lw r10, +32(r29)
+// 0x00000054: .cfi_restore: r10
+// 0x00000054: lw r11, +36(r29)
+// 0x00000058: .cfi_restore: r11
+// 0x00000058: lw r12, +40(r29)
+// 0x0000005c: .cfi_restore: r12
+// 0x0000005c: lw r13, +44(r29)
+// 0x00000060: .cfi_restore: r13
+// 0x00000060: lw r14, +48(r29)
+// 0x00000064: .cfi_restore: r14
+// 0x00000064: lw r15, +52(r29)
+// 0x00000068: .cfi_restore: r15
+// 0x00000068: lw r24, +56(r29)
+// 0x0000006c: .cfi_restore: r24
+// 0x0000006c: lw r31, +60(r29)
+// 0x00000070: .cfi_restore: r31
+// 0x00000070: addiu r29, r29, 64
+// 0x00000074: .cfi_def_cfa_offset: 0
+// 0x00000074: jr r31
+// 0x00000078: nop
+// 0x0000007c: .cfi_restore_state
+// 0x0000007c: .cfi_def_cfa_offset: 64
 
 static constexpr uint8_t expected_asm_kMips64[] = {
     0xA0, 0xFF, 0xBD, 0x67, 0x58, 0x00, 0xBF, 0xFF, 0x50, 0x00, 0xBE, 0xFF,
diff --git a/compiler/jni/quick/mips/calling_convention_mips.cc b/compiler/jni/quick/mips/calling_convention_mips.cc
index 4e716b5..be2397f 100644
--- a/compiler/jni/quick/mips/calling_convention_mips.cc
+++ b/compiler/jni/quick/mips/calling_convention_mips.cc
@@ -23,6 +23,10 @@
 namespace art {
 namespace mips {
 
+static const Register kCoreArgumentRegisters[] = { A0, A1, A2, A3 };
+static const FRegister kFArgumentRegisters[] = { F12, F14 };
+static const DRegister kDArgumentRegisters[] = { D6, D7 };
+
 // Calling convention
 ManagedRegister MipsManagedRuntimeCallingConvention::InterproceduralScratchRegister() {
   return MipsManagedRegister::FromCoreRegister(T9);
@@ -89,14 +93,49 @@
 const ManagedRegisterEntrySpills& MipsManagedRuntimeCallingConvention::EntrySpills() {
   // We spill the argument registers on MIPS to free them up for scratch use, we then assume
   // all arguments are on the stack.
-  if (entry_spills_.size() == 0) {
-    size_t num_spills = NumArgs() + NumLongOrDoubleArgs();
-    if (num_spills > 0) {
-      entry_spills_.push_back(MipsManagedRegister::FromCoreRegister(A1));
-      if (num_spills > 1) {
-        entry_spills_.push_back(MipsManagedRegister::FromCoreRegister(A2));
-        if (num_spills > 2) {
-          entry_spills_.push_back(MipsManagedRegister::FromCoreRegister(A3));
+  if ((entry_spills_.size() == 0) && (NumArgs() > 0)) {
+    uint32_t gpr_index = 1;  // Skip A0, it is used for ArtMethod*.
+    uint32_t fpr_index = 0;
+
+    for (ResetIterator(FrameOffset(0)); HasNext(); Next()) {
+      if (IsCurrentParamAFloatOrDouble()) {
+        if (IsCurrentParamADouble()) {
+          if (fpr_index < arraysize(kDArgumentRegisters)) {
+            entry_spills_.push_back(
+                MipsManagedRegister::FromDRegister(kDArgumentRegisters[fpr_index++]));
+          } else {
+            entry_spills_.push_back(ManagedRegister::NoRegister(), 8);
+          }
+        } else {
+          if (fpr_index < arraysize(kFArgumentRegisters)) {
+            entry_spills_.push_back(
+                MipsManagedRegister::FromFRegister(kFArgumentRegisters[fpr_index++]));
+          } else {
+            entry_spills_.push_back(ManagedRegister::NoRegister(), 4);
+          }
+        }
+      } else {
+        if (IsCurrentParamALong() && !IsCurrentParamAReference()) {
+          if (gpr_index == 1) {
+            // Don't use a1-a2 as a register pair, move to a2-a3 instead.
+            gpr_index++;
+          }
+          if (gpr_index < arraysize(kCoreArgumentRegisters) - 1) {
+            entry_spills_.push_back(
+                MipsManagedRegister::FromCoreRegister(kCoreArgumentRegisters[gpr_index++]));
+          } else if (gpr_index == arraysize(kCoreArgumentRegisters) - 1) {
+            gpr_index++;
+            entry_spills_.push_back(ManagedRegister::NoRegister(), 4);
+          } else {
+            entry_spills_.push_back(ManagedRegister::NoRegister(), 4);
+          }
+        }
+
+        if (gpr_index < arraysize(kCoreArgumentRegisters)) {
+          entry_spills_.push_back(
+            MipsManagedRegister::FromCoreRegister(kCoreArgumentRegisters[gpr_index++]));
+        } else {
+          entry_spills_.push_back(ManagedRegister::NoRegister(), 4);
         }
       }
     }
diff --git a/compiler/optimizing/code_generator.cc b/compiler/optimizing/code_generator.cc
index 6568ea4..503187b 100644
--- a/compiler/optimizing/code_generator.cc
+++ b/compiler/optimizing/code_generator.cc
@@ -887,7 +887,7 @@
         } else {
           stack_map_stream_.AddDexRegisterEntry(DexRegisterLocation::Kind::kInRegister, id);
           if (current->GetType() == Primitive::kPrimLong) {
-            stack_map_stream_.AddDexRegisterEntry(DexRegisterLocation::Kind::kInRegister, id);
+            stack_map_stream_.AddDexRegisterEntry(DexRegisterLocation::Kind::kInRegisterHigh, id);
             ++i;
             DCHECK_LT(i, environment_size);
           }
@@ -909,7 +909,8 @@
         } else {
           stack_map_stream_.AddDexRegisterEntry(DexRegisterLocation::Kind::kInFpuRegister, id);
           if (current->GetType() == Primitive::kPrimDouble) {
-            stack_map_stream_.AddDexRegisterEntry(DexRegisterLocation::Kind::kInFpuRegister, id);
+            stack_map_stream_.AddDexRegisterEntry(
+                DexRegisterLocation::Kind::kInFpuRegisterHigh, id);
             ++i;
             DCHECK_LT(i, environment_size);
           }
diff --git a/compiler/optimizing/stack_map_test.cc b/compiler/optimizing/stack_map_test.cc
index 33207d9..c4a3b28 100644
--- a/compiler/optimizing/stack_map_test.cc
+++ b/compiler/optimizing/stack_map_test.cc
@@ -143,6 +143,22 @@
   stream.AddDexRegisterEntry(Kind::kInFpuRegister, 3);   // Short location.
   stream.EndStackMapEntry();
 
+  ArenaBitVector sp_mask3(&arena, 0, true);
+  sp_mask3.SetBit(1);
+  sp_mask3.SetBit(5);
+  stream.BeginStackMapEntry(2, 192, 0xAB, &sp_mask3, number_of_dex_registers, 0);
+  stream.AddDexRegisterEntry(Kind::kInRegister, 6);       // Short location.
+  stream.AddDexRegisterEntry(Kind::kInRegisterHigh, 8);   // Short location.
+  stream.EndStackMapEntry();
+
+  ArenaBitVector sp_mask4(&arena, 0, true);
+  sp_mask4.SetBit(6);
+  sp_mask4.SetBit(7);
+  stream.BeginStackMapEntry(3, 256, 0xCD, &sp_mask4, number_of_dex_registers, 0);
+  stream.AddDexRegisterEntry(Kind::kInFpuRegister, 3);      // Short location, same in stack map 2.
+  stream.AddDexRegisterEntry(Kind::kInFpuRegisterHigh, 1);  // Short location.
+  stream.EndStackMapEntry();
+
   size_t size = stream.PrepareForFillIn();
   void* memory = arena.Alloc(size, kArenaAllocMisc);
   MemoryRegion region(memory, size);
@@ -151,15 +167,15 @@
   CodeInfo code_info(region);
   StackMapEncoding encoding = code_info.ExtractEncoding();
   ASSERT_EQ(2u, encoding.NumberOfBytesForStackMask());
-  ASSERT_EQ(2u, code_info.GetNumberOfStackMaps());
+  ASSERT_EQ(4u, code_info.GetNumberOfStackMaps());
 
   uint32_t number_of_location_catalog_entries = code_info.GetNumberOfLocationCatalogEntries();
-  ASSERT_EQ(4u, number_of_location_catalog_entries);
+  ASSERT_EQ(7u, number_of_location_catalog_entries);
   DexRegisterLocationCatalog location_catalog = code_info.GetDexRegisterLocationCatalog(encoding);
   // The Dex register location catalog contains:
-  // - three 1-byte short Dex register locations, and
+  // - six 1-byte short Dex register locations, and
   // - one 5-byte large Dex register location.
-  size_t expected_location_catalog_size = 3u * 1u + 5u;
+  size_t expected_location_catalog_size = 6u * 1u + 5u;
   ASSERT_EQ(expected_location_catalog_size, location_catalog.Size());
 
   // First stack map.
@@ -278,6 +294,116 @@
 
     ASSERT_FALSE(stack_map.HasInlineInfo(encoding));
   }
+
+  // Third stack map.
+  {
+    StackMap stack_map = code_info.GetStackMapAt(2, encoding);
+    ASSERT_TRUE(stack_map.Equals(code_info.GetStackMapForDexPc(2u, encoding)));
+    ASSERT_TRUE(stack_map.Equals(code_info.GetStackMapForNativePcOffset(192u, encoding)));
+    ASSERT_EQ(2u, stack_map.GetDexPc(encoding));
+    ASSERT_EQ(192u, stack_map.GetNativePcOffset(encoding));
+    ASSERT_EQ(0xABu, stack_map.GetRegisterMask(encoding));
+
+    MemoryRegion stack_mask = stack_map.GetStackMask(encoding);
+    ASSERT_TRUE(SameBits(stack_mask, sp_mask3));
+
+    ASSERT_TRUE(stack_map.HasDexRegisterMap(encoding));
+    DexRegisterMap dex_register_map =
+        code_info.GetDexRegisterMapOf(stack_map, encoding, number_of_dex_registers);
+    ASSERT_TRUE(dex_register_map.IsDexRegisterLive(0));
+    ASSERT_TRUE(dex_register_map.IsDexRegisterLive(1));
+    ASSERT_EQ(2u, dex_register_map.GetNumberOfLiveDexRegisters(number_of_dex_registers));
+    // The Dex register map contains:
+    // - one 1-byte live bit mask, and
+    // - one 1-byte set of location catalog entry indices composed of two 2-bit values.
+    size_t expected_dex_register_map_size = 1u + 1u;
+    ASSERT_EQ(expected_dex_register_map_size, dex_register_map.Size());
+
+    ASSERT_EQ(Kind::kInRegister, dex_register_map.GetLocationKind(
+                  0, number_of_dex_registers, code_info, encoding));
+    ASSERT_EQ(Kind::kInRegisterHigh, dex_register_map.GetLocationKind(
+                  1, number_of_dex_registers, code_info, encoding));
+    ASSERT_EQ(Kind::kInRegister, dex_register_map.GetLocationInternalKind(
+                  0, number_of_dex_registers, code_info, encoding));
+    ASSERT_EQ(Kind::kInRegisterHigh, dex_register_map.GetLocationInternalKind(
+                  1, number_of_dex_registers, code_info, encoding));
+    ASSERT_EQ(6, dex_register_map.GetMachineRegister(
+                  0, number_of_dex_registers, code_info, encoding));
+    ASSERT_EQ(8, dex_register_map.GetMachineRegister(
+                  1, number_of_dex_registers, code_info, encoding));
+
+    size_t index0 = dex_register_map.GetLocationCatalogEntryIndex(
+        0, number_of_dex_registers, number_of_location_catalog_entries);
+    size_t index1 = dex_register_map.GetLocationCatalogEntryIndex(
+        1, number_of_dex_registers, number_of_location_catalog_entries);
+    ASSERT_EQ(4u, index0);
+    ASSERT_EQ(5u, index1);
+    DexRegisterLocation location0 = location_catalog.GetDexRegisterLocation(index0);
+    DexRegisterLocation location1 = location_catalog.GetDexRegisterLocation(index1);
+    ASSERT_EQ(Kind::kInRegister, location0.GetKind());
+    ASSERT_EQ(Kind::kInRegisterHigh, location1.GetKind());
+    ASSERT_EQ(Kind::kInRegister, location0.GetInternalKind());
+    ASSERT_EQ(Kind::kInRegisterHigh, location1.GetInternalKind());
+    ASSERT_EQ(6, location0.GetValue());
+    ASSERT_EQ(8, location1.GetValue());
+
+    ASSERT_FALSE(stack_map.HasInlineInfo(encoding));
+  }
+
+  // Fourth stack map.
+  {
+    StackMap stack_map = code_info.GetStackMapAt(3, encoding);
+    ASSERT_TRUE(stack_map.Equals(code_info.GetStackMapForDexPc(3u, encoding)));
+    ASSERT_TRUE(stack_map.Equals(code_info.GetStackMapForNativePcOffset(256u, encoding)));
+    ASSERT_EQ(3u, stack_map.GetDexPc(encoding));
+    ASSERT_EQ(256u, stack_map.GetNativePcOffset(encoding));
+    ASSERT_EQ(0xCDu, stack_map.GetRegisterMask(encoding));
+
+    MemoryRegion stack_mask = stack_map.GetStackMask(encoding);
+    ASSERT_TRUE(SameBits(stack_mask, sp_mask4));
+
+    ASSERT_TRUE(stack_map.HasDexRegisterMap(encoding));
+    DexRegisterMap dex_register_map =
+        code_info.GetDexRegisterMapOf(stack_map, encoding, number_of_dex_registers);
+    ASSERT_TRUE(dex_register_map.IsDexRegisterLive(0));
+    ASSERT_TRUE(dex_register_map.IsDexRegisterLive(1));
+    ASSERT_EQ(2u, dex_register_map.GetNumberOfLiveDexRegisters(number_of_dex_registers));
+    // The Dex register map contains:
+    // - one 1-byte live bit mask, and
+    // - one 1-byte set of location catalog entry indices composed of two 2-bit values.
+    size_t expected_dex_register_map_size = 1u + 1u;
+    ASSERT_EQ(expected_dex_register_map_size, dex_register_map.Size());
+
+    ASSERT_EQ(Kind::kInFpuRegister, dex_register_map.GetLocationKind(
+                  0, number_of_dex_registers, code_info, encoding));
+    ASSERT_EQ(Kind::kInFpuRegisterHigh, dex_register_map.GetLocationKind(
+                  1, number_of_dex_registers, code_info, encoding));
+    ASSERT_EQ(Kind::kInFpuRegister, dex_register_map.GetLocationInternalKind(
+                  0, number_of_dex_registers, code_info, encoding));
+    ASSERT_EQ(Kind::kInFpuRegisterHigh, dex_register_map.GetLocationInternalKind(
+                  1, number_of_dex_registers, code_info, encoding));
+    ASSERT_EQ(3, dex_register_map.GetMachineRegister(
+                  0, number_of_dex_registers, code_info, encoding));
+    ASSERT_EQ(1, dex_register_map.GetMachineRegister(
+                  1, number_of_dex_registers, code_info, encoding));
+
+    size_t index0 = dex_register_map.GetLocationCatalogEntryIndex(
+        0, number_of_dex_registers, number_of_location_catalog_entries);
+    size_t index1 = dex_register_map.GetLocationCatalogEntryIndex(
+        1, number_of_dex_registers, number_of_location_catalog_entries);
+    ASSERT_EQ(3u, index0);  // Shared with second stack map.
+    ASSERT_EQ(6u, index1);
+    DexRegisterLocation location0 = location_catalog.GetDexRegisterLocation(index0);
+    DexRegisterLocation location1 = location_catalog.GetDexRegisterLocation(index1);
+    ASSERT_EQ(Kind::kInFpuRegister, location0.GetKind());
+    ASSERT_EQ(Kind::kInFpuRegisterHigh, location1.GetKind());
+    ASSERT_EQ(Kind::kInFpuRegister, location0.GetInternalKind());
+    ASSERT_EQ(Kind::kInFpuRegisterHigh, location1.GetInternalKind());
+    ASSERT_EQ(3, location0.GetValue());
+    ASSERT_EQ(1, location1.GetValue());
+
+    ASSERT_FALSE(stack_map.HasInlineInfo(encoding));
+  }
 }
 
 TEST(StackMapTest, TestNonLiveDexRegisters) {
diff --git a/compiler/utils/mips/assembler_mips.cc b/compiler/utils/mips/assembler_mips.cc
index c09dfcc..c5fae92 100644
--- a/compiler/utils/mips/assembler_mips.cc
+++ b/compiler/utils/mips/assembler_mips.cc
@@ -359,23 +359,19 @@
 }
 
 void MipsAssembler::AddD(DRegister fd, DRegister fs, DRegister ft) {
-  EmitFR(0x11, 0x11, static_cast<FRegister>(ft), static_cast<FRegister>(fs),
-         static_cast<FRegister>(fd), 0x0);
+  EmitFR(0x11, 0x11, ConvertDRegToFReg(ft), ConvertDRegToFReg(fs), ConvertDRegToFReg(fd), 0x0);
 }
 
 void MipsAssembler::SubD(DRegister fd, DRegister fs, DRegister ft) {
-  EmitFR(0x11, 0x11, static_cast<FRegister>(ft), static_cast<FRegister>(fs),
-         static_cast<FRegister>(fd), 0x1);
+  EmitFR(0x11, 0x11, ConvertDRegToFReg(ft), ConvertDRegToFReg(fs), ConvertDRegToFReg(fd), 0x1);
 }
 
 void MipsAssembler::MulD(DRegister fd, DRegister fs, DRegister ft) {
-  EmitFR(0x11, 0x11, static_cast<FRegister>(ft), static_cast<FRegister>(fs),
-         static_cast<FRegister>(fd), 0x2);
+  EmitFR(0x11, 0x11, ConvertDRegToFReg(ft), ConvertDRegToFReg(fs), ConvertDRegToFReg(fd), 0x2);
 }
 
 void MipsAssembler::DivD(DRegister fd, DRegister fs, DRegister ft) {
-  EmitFR(0x11, 0x11, static_cast<FRegister>(ft), static_cast<FRegister>(fs),
-         static_cast<FRegister>(fd), 0x3);
+  EmitFR(0x11, 0x11, ConvertDRegToFReg(ft), ConvertDRegToFReg(fs), ConvertDRegToFReg(fd), 0x3);
 }
 
 void MipsAssembler::MovS(FRegister fd, FRegister fs) {
@@ -383,32 +379,31 @@
 }
 
 void MipsAssembler::MovD(DRegister fd, DRegister fs) {
-  EmitFR(0x11, 0x11, static_cast<FRegister>(0), static_cast<FRegister>(fs),
-         static_cast<FRegister>(fd), 0x6);
+  EmitFR(0x11, 0x11, static_cast<FRegister>(0), ConvertDRegToFReg(fs), ConvertDRegToFReg(fd), 0x6);
 }
 
 void MipsAssembler::Mfc1(Register rt, FRegister fs) {
-  EmitFR(0x11, 0x00, static_cast<FRegister>(rt), fs, static_cast<FRegister>(0), 0x0);
+  EmitFR(0x11, 0x00, ConvertRegToFReg(rt), fs, static_cast<FRegister>(0), 0x0);
 }
 
 void MipsAssembler::Mtc1(FRegister ft, Register rs) {
-  EmitFR(0x11, 0x04, ft, static_cast<FRegister>(rs), static_cast<FRegister>(0), 0x0);
+  EmitFR(0x11, 0x04, ft, ConvertRegToFReg(rs), static_cast<FRegister>(0), 0x0);
 }
 
 void MipsAssembler::Lwc1(FRegister ft, Register rs, uint16_t imm16) {
-  EmitI(0x31, rs, static_cast<Register>(ft), imm16);
+  EmitI(0x31, rs, ConvertFRegToReg(ft), imm16);
 }
 
 void MipsAssembler::Ldc1(DRegister ft, Register rs, uint16_t imm16) {
-  EmitI(0x35, rs, static_cast<Register>(ft), imm16);
+  EmitI(0x35, rs, ConvertDRegToReg(ft), imm16);
 }
 
 void MipsAssembler::Swc1(FRegister ft, Register rs, uint16_t imm16) {
-  EmitI(0x39, rs, static_cast<Register>(ft), imm16);
+  EmitI(0x39, rs, ConvertFRegToReg(ft), imm16);
 }
 
 void MipsAssembler::Sdc1(DRegister ft, Register rs, uint16_t imm16) {
-  EmitI(0x3d, rs, static_cast<Register>(ft), imm16);
+  EmitI(0x3d, rs, ConvertDRegToReg(ft), imm16);
 }
 
 void MipsAssembler::Break() {
@@ -529,7 +524,7 @@
   }
 }
 
-void MipsAssembler::StoreFToOffset(FRegister reg, Register base, int32_t offset) {
+void MipsAssembler::StoreSToOffset(FRegister reg, Register base, int32_t offset) {
   Swc1(reg, base, offset);
 }
 
@@ -566,9 +561,22 @@
   StoreToOffset(kStoreWord, method_reg.AsMips().AsCoreRegister(), SP, 0);
 
   // Write out entry spills.
+  int32_t offset = frame_size + kFramePointerSize;
   for (size_t i = 0; i < entry_spills.size(); ++i) {
-    Register reg = entry_spills.at(i).AsMips().AsCoreRegister();
-    StoreToOffset(kStoreWord, reg, SP, frame_size + kFramePointerSize + (i * kFramePointerSize));
+    MipsManagedRegister reg = entry_spills.at(i).AsMips();
+    if (reg.IsNoRegister()) {
+      ManagedRegisterSpill spill = entry_spills.at(i);
+      offset += spill.getSize();
+    } else if (reg.IsCoreRegister()) {
+      StoreToOffset(kStoreWord, reg.AsCoreRegister(), SP, offset);
+      offset += 4;
+    } else if (reg.IsFRegister()) {
+      StoreSToOffset(reg.AsFRegister(), SP, offset);
+      offset += 4;
+    } else if (reg.IsDRegister()) {
+      StoreDToOffset(reg.AsDRegister(), SP, offset);
+      offset += 8;
+    }
   }
 }
 
@@ -624,7 +632,7 @@
     StoreToOffset(kStoreWord, src.AsRegisterPairHigh(),
                   SP, dest.Int32Value() + 4);
   } else if (src.IsFRegister()) {
-    StoreFToOffset(src.AsFRegister(), SP, dest.Int32Value());
+    StoreSToOffset(src.AsFRegister(), SP, dest.Int32Value());
   } else {
     CHECK(src.IsDRegister());
     StoreDToOffset(src.AsDRegister(), SP, dest.Int32Value());
diff --git a/compiler/utils/mips/assembler_mips.h b/compiler/utils/mips/assembler_mips.h
index df95dad..6c8b162 100644
--- a/compiler/utils/mips/assembler_mips.h
+++ b/compiler/utils/mips/assembler_mips.h
@@ -141,7 +141,7 @@
   void LoadSFromOffset(FRegister reg, Register base, int32_t offset);
   void LoadDFromOffset(DRegister reg, Register base, int32_t offset);
   void StoreToOffset(StoreOperandType type, Register reg, Register base, int32_t offset);
-  void StoreFToOffset(FRegister reg, Register base, int32_t offset);
+  void StoreSToOffset(FRegister reg, Register base, int32_t offset);
   void StoreDToOffset(DRegister reg, Register base, int32_t offset);
 
   // Emit data (e.g. encoded instruction or immediate) to the instruction stream.
@@ -277,6 +277,19 @@
   int32_t EncodeBranchOffset(int offset, int32_t inst, bool is_jump);
   int DecodeBranchOffset(int32_t inst, bool is_jump);
 
+  FRegister ConvertDRegToFReg(DRegister reg) {
+    return static_cast<FRegister>(reg * 2);
+  }
+  Register ConvertDRegToReg(DRegister reg) {
+    return static_cast<Register>(reg * 2);
+  }
+  Register ConvertFRegToReg(FRegister reg) {
+    return static_cast<Register>(reg);
+  }
+  FRegister ConvertRegToFReg(Register reg) {
+    return static_cast<FRegister>(reg);
+  }
+
   DISALLOW_COPY_AND_ASSIGN(MipsAssembler);
 };
 
diff --git a/runtime/arch/mips/asm_support_mips.h b/runtime/arch/mips/asm_support_mips.h
index 390c606..453056d 100644
--- a/runtime/arch/mips/asm_support_mips.h
+++ b/runtime/arch/mips/asm_support_mips.h
@@ -21,6 +21,6 @@
 
 #define FRAME_SIZE_SAVE_ALL_CALLEE_SAVE 96
 #define FRAME_SIZE_REFS_ONLY_CALLEE_SAVE 48
-#define FRAME_SIZE_REFS_AND_ARGS_CALLEE_SAVE 64
+#define FRAME_SIZE_REFS_AND_ARGS_CALLEE_SAVE 80
 
 #endif  // ART_RUNTIME_ARCH_MIPS_ASM_SUPPORT_MIPS_H_
diff --git a/runtime/arch/mips/context_mips.cc b/runtime/arch/mips/context_mips.cc
index 53f2b65..bc2bf68 100644
--- a/runtime/arch/mips/context_mips.cc
+++ b/runtime/arch/mips/context_mips.cc
@@ -76,6 +76,11 @@
   gprs_[A1] = nullptr;
   gprs_[A2] = nullptr;
   gprs_[A3] = nullptr;
+
+  fprs_[F12] = nullptr;
+  fprs_[F13] = nullptr;
+  fprs_[F14] = nullptr;
+  fprs_[F15] = nullptr;
 }
 
 extern "C" NO_RETURN void art_quick_do_long_jump(uint32_t*, uint32_t*);
diff --git a/runtime/arch/mips/jni_entrypoints_mips.S b/runtime/arch/mips/jni_entrypoints_mips.S
index fbc81d5..3558efd 100644
--- a/runtime/arch/mips/jni_entrypoints_mips.S
+++ b/runtime/arch/mips/jni_entrypoints_mips.S
@@ -24,17 +24,19 @@
      */
     .extern artFindNativeMethod
 ENTRY art_jni_dlsym_lookup_stub
-    addiu $sp, $sp, -32          # leave room for $a0, $a1, $a2, $a3, and $ra
-    .cfi_adjust_cfa_offset 32
-    sw     $ra, 16($sp)
-    .cfi_rel_offset 31, 16
-    sw     $a3, 12($sp)
+    addiu $sp, $sp, -48         # leave room for $f12, $f13, $f14, $f15, $a0, $a1, $a2, $a3, and $ra
+    .cfi_adjust_cfa_offset 48
+    sw    $ra, 32($sp)
+    .cfi_rel_offset 31, 32
+    SDu   $f14, $f15, 24, $sp, $t0
+    SDu   $f12, $f13, 16, $sp, $t0
+    sw    $a3, 12($sp)
     .cfi_rel_offset 7, 12
-    sw     $a2, 8($sp)
+    sw    $a2, 8($sp)
     .cfi_rel_offset 6, 8
-    sw     $a1, 4($sp)
+    sw    $a1, 4($sp)
     .cfi_rel_offset 5, 4
-    sw     $a0, 0($sp)
+    sw    $a0, 0($sp)
     .cfi_rel_offset 4, 0
     jal   artFindNativeMethod   # (Thread*)
     move  $a0, $s1              # pass Thread::Current()
@@ -42,10 +44,12 @@
     lw    $a1, 4($sp)
     lw    $a2, 8($sp)
     lw    $a3, 12($sp)
-    lw    $ra, 16($sp)
+    LDu   $f12, $f13, 16, $sp, $t0
+    LDu   $f14, $f15, 24, $sp, $t0
+    lw    $ra, 32($sp)
     beq   $v0, $zero, .Lno_native_code_found
-    addiu $sp, $sp, 32          # restore the stack
-    .cfi_adjust_cfa_offset -32
+    addiu $sp, $sp, 48          # restore the stack
+    .cfi_adjust_cfa_offset -48
     move  $t9, $v0              # put method code result in $t9
     jalr  $zero, $t9            # leaf call to method's code
     nop
diff --git a/runtime/arch/mips/quick_entrypoints_mips.S b/runtime/arch/mips/quick_entrypoints_mips.S
index 4d5004f..8bc75e5 100644
--- a/runtime/arch/mips/quick_entrypoints_mips.S
+++ b/runtime/arch/mips/quick_entrypoints_mips.S
@@ -170,45 +170,47 @@
      * callee-save: $a1-$a3, $s2-$s8 + $gp + $ra, 12 total + 3 words padding + method*
      */
 .macro SETUP_REFS_AND_ARGS_CALLEE_SAVE_FRAME_REGISTERS_ONLY
-    addiu  $sp, $sp, -64
-    .cfi_adjust_cfa_offset 64
+    addiu  $sp, $sp, -80
+    .cfi_adjust_cfa_offset 80
 
     // Ugly compile-time check, but we only have the preprocessor.
-#if (FRAME_SIZE_REFS_AND_ARGS_CALLEE_SAVE != 64)
+#if (FRAME_SIZE_REFS_AND_ARGS_CALLEE_SAVE != 80)
 #error "REFS_AND_ARGS_CALLEE_SAVE_FRAME(MIPS) size not as expected."
 #endif
 
-    sw     $ra, 60($sp)
-    .cfi_rel_offset 31, 60
-    sw     $s8, 56($sp)
-    .cfi_rel_offset 30, 56
-    sw     $gp, 52($sp)
-    .cfi_rel_offset 28, 52
-    sw     $s7, 48($sp)
-    .cfi_rel_offset 23, 48
-    sw     $s6, 44($sp)
-    .cfi_rel_offset 22, 44
-    sw     $s5, 40($sp)
-    .cfi_rel_offset 21, 40
-    sw     $s4, 36($sp)
-    .cfi_rel_offset 20, 36
-    sw     $s3, 32($sp)
-    .cfi_rel_offset 19, 32
-    sw     $s2, 28($sp)
-    .cfi_rel_offset 18, 28
-    sw     $a3, 24($sp)
-    .cfi_rel_offset 7, 24
-    sw     $a2, 20($sp)
-    .cfi_rel_offset 6, 20
-    sw     $a1, 16($sp)
-    .cfi_rel_offset 5, 16
+    sw     $ra, 76($sp)
+    .cfi_rel_offset 31, 76
+    sw     $s8, 72($sp)
+    .cfi_rel_offset 30, 72
+    sw     $gp, 68($sp)
+    .cfi_rel_offset 28, 68
+    sw     $s7, 64($sp)
+    .cfi_rel_offset 23, 64
+    sw     $s6, 60($sp)
+    .cfi_rel_offset 22, 60
+    sw     $s5, 56($sp)
+    .cfi_rel_offset 21, 56
+    sw     $s4, 52($sp)
+    .cfi_rel_offset 20, 52
+    sw     $s3, 48($sp)
+    .cfi_rel_offset 19, 48
+    sw     $s2, 44($sp)
+    .cfi_rel_offset 18, 44
+    sw     $a3, 40($sp)
+    .cfi_rel_offset 7, 40
+    sw     $a2, 36($sp)
+    .cfi_rel_offset 6, 36
+    sw     $a1, 32($sp)
+    .cfi_rel_offset 5, 32
+    SDu $f14, $f15, 24, $sp, $t0
+    SDu $f12, $f13, 16, $sp, $t0
     # bottom will hold Method*
 .endm
 
     /*
      * Macro that sets up the callee save frame to conform with
      * Runtime::CreateCalleeSaveMethod(kRefsAndArgs). Restoration assumes non-moving GC.
-     * callee-save: $a1-$a3, $s2-$s8 + $gp + $ra, 12 total + 3 words padding + method*
+     * callee-save: $a1-$a3, $f12-$f15, $s2-$s8 + $gp + $ra, 12 total + 3 words padding + method*
      * Clobbers $t0 and $sp
      * Allocates ARG_SLOT_SIZE bytes at the bottom of the stack for arg slots.
      * Reserves FRAME_SIZE_REFS_AND_ARGS_CALLEE_SAVE + ARG_SLOT_SIZE bytes on the stack
@@ -227,7 +229,7 @@
     /*
      * Macro that sets up the callee save frame to conform with
      * Runtime::CreateCalleeSaveMethod(kRefsAndArgs). Restoration assumes non-moving GC.
-     * callee-save: $a1-$a3, $s2-$s8 + $gp + $ra, 12 total + 3 words padding + method*
+     * callee-save: $a1-$a3, $f12-$f15, $s2-$s8 + $gp + $ra, 12 total + 3 words padding + method*
      * Clobbers $sp
      * Use $a0 as the Method* and loads it into bottom of stack.
      * Allocates ARG_SLOT_SIZE bytes at the bottom of the stack for arg slots.
@@ -244,32 +246,34 @@
 .macro RESTORE_REFS_AND_ARGS_CALLEE_SAVE_FRAME
     addiu  $sp, $sp, ARG_SLOT_SIZE                # remove argument slots on the stack
     .cfi_adjust_cfa_offset -ARG_SLOT_SIZE
-    lw     $ra, 60($sp)
+    lw     $ra, 76($sp)
     .cfi_restore 31
-    lw     $s8, 56($sp)
+    lw     $s8, 72($sp)
     .cfi_restore 30
-    lw     $gp, 52($sp)
+    lw     $gp, 68($sp)
     .cfi_restore 28
-    lw     $s7, 48($sp)
+    lw     $s7, 64($sp)
     .cfi_restore 23
-    lw     $s6, 44($sp)
+    lw     $s6, 60($sp)
     .cfi_restore 22
-    lw     $s5, 40($sp)
+    lw     $s5, 56($sp)
     .cfi_restore 21
-    lw     $s4, 36($sp)
+    lw     $s4, 52($sp)
     .cfi_restore 20
-    lw     $s3, 32($sp)
+    lw     $s3, 48($sp)
     .cfi_restore 19
-    lw     $s2, 28($sp)
+    lw     $s2, 44($sp)
     .cfi_restore 18
-    lw     $a3, 24($sp)
+    lw     $a3, 40($sp)
     .cfi_restore 7
-    lw     $a2, 20($sp)
+    lw     $a2, 36($sp)
     .cfi_restore 6
-    lw     $a1, 16($sp)
+    lw     $a1, 32($sp)
     .cfi_restore 5
-    addiu  $sp, $sp, 64           # pop frame
-    .cfi_adjust_cfa_offset -64
+    LDu $f14, $f15, 24, $sp, $t1
+    LDu $f12, $f13, 16, $sp, $t1
+    addiu  $sp, $sp, 80           # pop frame
+    .cfi_adjust_cfa_offset -80
 .endm
 
     /*
@@ -484,6 +488,32 @@
 INVOKE_TRAMPOLINE art_quick_invoke_super_trampoline_with_access_check, artInvokeSuperTrampolineWithAccessCheck
 INVOKE_TRAMPOLINE art_quick_invoke_virtual_trampoline_with_access_check, artInvokeVirtualTrampolineWithAccessCheck
 
+.macro LOAD_WORD_TO_REG reg, next_arg, index, label
+    lw    $\reg, -4($\next_arg)   # next_arg points to argument after the current one (offset is 4)
+    b     \label
+    addiu $\index, 1
+.endm
+
+.macro LOAD_LONG_TO_REG reg1, reg2, next_arg, index, label
+    lw    $\reg1, -8($\next_arg)  # next_arg points to argument after the current one (offset is 8)
+    lw    $\reg2, -4($\next_arg)
+    b     \label
+    li    $\index, 4              # long can be loaded only to a2_a3 pair so index will be always 4
+.endm
+
+.macro LOAD_FLOAT_TO_REG reg, next_arg, index, label
+    lwc1  $\reg, -4($\next_arg)   # next_arg points to argument after the current one (offset is 4)
+    b     \label
+    addiu $\index, 1
+.endm
+
+.macro LOAD_DOUBLE_TO_REG reg1, reg2, next_arg, index, tmp, label
+    LDu  $\reg1, $\reg2, -8, $\next_arg, $\tmp  # next_arg points to argument after the current one
+                                                # (offset is 8)
+    b     \label
+    addiu $\index, 1
+.endm
+
     /*
      * Invocation stub for quick code.
      * On entry:
@@ -510,21 +540,76 @@
     .cfi_def_cfa_register 30
     move  $s1, $a3              # move managed thread pointer into s1
     addiu $s0, $zero, SUSPEND_CHECK_INTERVAL  # reset s0 to suspend check interval
-    addiu $t0, $a2, 4           # create space for method pointer in frame.
+    addiu $t0, $a2, 4           # create space for ArtMethod* in frame.
     subu  $t0, $sp, $t0         # reserve & align *stack* to 16 bytes:
-    srl   $t0, $t0, 4           # native calling convention only aligns to 8B,
-    sll   $sp, $t0, 4           # so we have to ensure ART 16B alignment ourselves.
-    addiu $a0, $sp, 4           # pass stack pointer + method ptr as dest for memcpy
+    srl   $t0, $t0, 4           #   native calling convention only aligns to 8B,
+    sll   $sp, $t0, 4           #   so we have to ensure ART 16B alignment ourselves.
+    addiu $a0, $sp, 4           # pass stack pointer + ArtMethod* as dest for memcpy
     jal   memcpy                # (dest, src, bytes)
     addiu $sp, $sp, -16         # make space for argument slots for memcpy
     addiu $sp, $sp, 16          # restore stack after memcpy
-    lw    $a0, 16($fp)          # restore method*
-    lw    $a1, 4($sp)           # copy arg value for a1
-    lw    $a2, 8($sp)           # copy arg value for a2
-    lw    $a3, 12($sp)          # copy arg value for a3
+    lw    $a0, 16($fp)          # restore ArtMethod*
+    lw    $a1, 4($sp)           # a1 = this*
+    addiu $t0, $sp, 8           # t0 = pointer to the current argument (skip ArtMethod* and this*)
+    li    $t3, 2                # t3 = gpr_index = 2 (skip A0 and A1)
+    move  $t4, $zero            # t4 = fp_index = 0
+    lw    $t1, 20+16($fp)       # get shorty (20 is offset from the $sp on entry + 16 as the $fp is
+                                # 16 bytes below the $sp on entry)
+    addiu $t1, 1                # t1 = shorty + 1 (skip 1 for return type)
+loop:
+    lbu   $t2, 0($t1)           # t2 = shorty[i]
+    beqz  $t2, loopEnd          # finish getting args when shorty[i] == '\0'
+    addiu $t1, 1
+
+    li    $t9, 'J'              # put char 'J' into t9
+    beq   $t9, $t2, isLong      # branch if result type char == 'J'
+    li    $t9, 'D'              # put char 'D' into t9
+    beq   $t9, $t2, isDouble    # branch if result type char == 'D'
+    li    $t9, 'F'              # put char 'F' into t9
+    beq   $t9, $t2, isSingle    # branch if result type char == 'F'
+    addiu $t0, 4                # next_arg = curr_arg + 4 (in branch delay slot,
+                                # for both, int and single)
+
+    li    $t5, 2                                   # skip a0 and a1 (ArtMethod* and this*)
+    bne   $t5, $t3, 1f                             # if (gpr_index == 2)
+    addiu $t5, 1
+    LOAD_WORD_TO_REG a2, t0, t3, loop              #   a2 = current argument, gpr_index++
+1:  bne   $t5, $t3, loop                           # else if (gpr_index == 3)
+    nop
+    LOAD_WORD_TO_REG a3, t0, t3, loop              #   a3 = current argument, gpr_index++
+
+isLong:
+    addiu $t0, 8                                   # next_arg = curr_arg + 8
+    slti  $t5, $t3, 3
+    beqz  $t5, 2f                                  # if (gpr_index < 3)
+    nop
+    LOAD_LONG_TO_REG a2, a3, t0, t3, loop          #   a2_a3 = curr_arg, gpr_index = 4
+2:  b     loop                                     # else
+    li    $t3, 4                                   #   gpr_index = 4
+
+isDouble:
+    addiu $t0, 8                                   # next_arg = curr_arg + 8
+    li    $t5, 0
+    bne   $t5, $t4, 3f                             # if (fp_index == 0)
+    addiu $t5, 1
+    LOAD_DOUBLE_TO_REG f12, f13, t0, t4, t9, loop  #   f12_f13 = curr_arg, fp_index++
+3:  bne   $t5, $t4, loop                           # else if (fp_index == 1)
+    nop
+    LOAD_DOUBLE_TO_REG f14, f15, t0, t4, t9, loop  #   f14_f15 = curr_arg, fp_index++
+
+isSingle:
+    li    $t5, 0
+    bne   $t5, $t4, 4f                             # if (fp_index == 0)
+    addiu $t5, 1
+    LOAD_FLOAT_TO_REG f12, t0, t4, loop            #   f12 = curr_arg, fp_index++
+4:  bne   $t5, $t4, loop                           # else if (fp_index == 1)
+    nop
+    LOAD_FLOAT_TO_REG f14, t0, t4, loop            #   f14 = curr_arg, fp_index++
+
+loopEnd:
     lw    $t9, ART_METHOD_QUICK_CODE_OFFSET_32($a0)  # get pointer to the code
     jalr  $t9                   # call the method
-    sw    $zero, 0($sp)         # store null for method* at bottom of frame
+    sw    $zero, 0($sp)         # store null for ArtMethod* at bottom of frame
     move  $sp, $fp              # restore the stack
     lw    $s0, 0($sp)
     .cfi_restore 16
@@ -539,20 +624,145 @@
     lw    $t0, 16($sp)          # get result pointer
     lw    $t1, 20($sp)          # get shorty
     lb    $t1, 0($t1)           # get result type char
-    li    $t2, 68               # put char 'D' into t2
-    beq   $t1, $t2, 1f          # branch if result type char == 'D'
-    li    $t3, 70               # put char 'F' into t3
-    beq   $t1, $t3, 1f          # branch if result type char == 'F'
+    li    $t2, 'D'              # put char 'D' into t2
+    beq   $t1, $t2, 5f          # branch if result type char == 'D'
+    li    $t3, 'F'              # put char 'F' into t3
+    beq   $t1, $t3, 5f          # branch if result type char == 'F'
     sw    $v0, 0($t0)           # store the result
     jalr  $zero, $ra
     sw    $v1, 4($t0)           # store the other half of the result
-1:
+5:
     SDu   $f0, $f1, 0, $t0, $t1 # store floating point result
     jalr  $zero, $ra
     nop
 END art_quick_invoke_stub
 
     /*
+     * Invocation static stub for quick code.
+     * On entry:
+     *   a0 = method pointer
+     *   a1 = argument array or null for no argument methods
+     *   a2 = size of argument array in bytes
+     *   a3 = (managed) thread pointer
+     *   [sp + 16] = JValue* result
+     *   [sp + 20] = shorty
+     */
+ENTRY art_quick_invoke_static_stub
+    sw    $a0, 0($sp)           # save out a0
+    addiu $sp, $sp, -16         # spill s0, s1, fp, ra
+    .cfi_adjust_cfa_offset 16
+    sw    $ra, 12($sp)
+    .cfi_rel_offset 31, 12
+    sw    $fp, 8($sp)
+    .cfi_rel_offset 30, 8
+    sw    $s1, 4($sp)
+    .cfi_rel_offset 17, 4
+    sw    $s0, 0($sp)
+    .cfi_rel_offset 16, 0
+    move  $fp, $sp              # save sp in fp
+    .cfi_def_cfa_register 30
+    move  $s1, $a3              # move managed thread pointer into s1
+    addiu $s0, $zero, SUSPEND_CHECK_INTERVAL  # reset s0 to suspend check interval
+    addiu $t0, $a2, 4           # create space for ArtMethod* in frame.
+    subu  $t0, $sp, $t0         # reserve & align *stack* to 16 bytes:
+    srl   $t0, $t0, 4           #   native calling convention only aligns to 8B,
+    sll   $sp, $t0, 4           #   so we have to ensure ART 16B alignment ourselves.
+    addiu $a0, $sp, 4           # pass stack pointer + ArtMethod* as dest for memcpy
+    jal   memcpy                # (dest, src, bytes)
+    addiu $sp, $sp, -16         # make space for argument slots for memcpy
+    addiu $sp, $sp, 16          # restore stack after memcpy
+    lw    $a0, 16($fp)          # restore ArtMethod*
+    addiu $t0, $sp, 4           # t0 = pointer to the current argument (skip ArtMethod*)
+    li    $t3, 1                # t3 = gpr_index = 1 (skip A0)
+    move  $t4, $zero            # t4 = fp_index = 0
+    lw    $t1, 20+16($fp)       # get shorty (20 is offset from the $sp on entry + 16 as the $fp is
+                                # 16 bytes below the $sp on entry)
+    addiu $t1, 1                # t1 = shorty + 1 (skip 1 for return type)
+loopS:
+    lbu   $t2, 0($t1)           # t2 = shorty[i]
+    beqz  $t2, loopEndS         # finish getting args when shorty[i] == '\0'
+    addiu $t1, 1
+
+    li    $t9, 'J'              # put char 'J' into t9
+    beq   $t9, $t2, isLongS     # branch if result type char == 'J'
+    li    $t9, 'D'              # put char 'D' into t9
+    beq   $t9, $t2, isDoubleS   # branch if result type char == 'D'
+    li    $t9, 'F'              # put char 'F' into t9
+    beq   $t9, $t2, isSingleS   # branch if result type char == 'F'
+    addiu $t0, 4                # next_arg = curr_arg + 4 (in branch delay slot,
+                                # for both, int and single)
+
+    li    $t5, 1                                    # skip a0 (ArtMethod*)
+    bne   $t5, $t3, 1f                              # if (gpr_index == 1)
+    addiu $t5, 1
+    LOAD_WORD_TO_REG a1, t0, t3, loopS              #   a1 = current argument, gpr_index++
+1:  bne   $t5, $t3, 2f                              # else if (gpr_index == 2)
+    addiu $t5, 1
+    LOAD_WORD_TO_REG a2, t0, t3, loopS              #   a2 = current argument, gpr_index++
+2:  bne   $t5, $t3, loopS                           # else if (gpr_index == 3)
+    nop
+    LOAD_WORD_TO_REG a3, t0, t3, loopS              #   a3 = current argument, gpr_index++
+
+isLongS:
+    addiu $t0, 8                                    # next_arg = curr_arg + 8
+    slti  $t5, $t3, 3
+    beqz  $t5, 3f                                   # if (gpr_index < 3)
+    nop
+    LOAD_LONG_TO_REG a2, a3, t0, t3, loopS          #   a2_a3 = curr_arg, gpr_index = 4
+3:  b     loopS                                     # else
+    li    $t3, 4                                    #   gpr_index = 4
+
+isDoubleS:
+    addiu $t0, 8                                    # next_arg = curr_arg + 8
+    li    $t5, 0
+    bne   $t5, $t4, 4f                              # if (fp_index == 0)
+    addiu $t5, 1
+    LOAD_DOUBLE_TO_REG f12, f13, t0, t4, t9, loopS  #   f12_f13 = curr_arg, fp_index++
+4:  bne   $t5, $t4, loopS                           # else if (fp_index == 1)
+    nop
+    LOAD_DOUBLE_TO_REG f14, f15, t0, t4, t9, loopS  #   f14_f15 = curr_arg, fp_index++
+
+isSingleS:
+    li    $t5, 0
+    bne   $t5, $t4, 5f                              # if (fp_index == 0)
+    addiu $t5, 1
+    LOAD_FLOAT_TO_REG f12, t0, t4, loopS            #   f12 = curr_arg, fp_index++
+5:  bne   $t5, $t4, loopS                           # else if (fp_index == 1)
+    nop
+    LOAD_FLOAT_TO_REG f14, t0, t4, loopS            #   f14 = curr_arg, fp_index++
+
+loopEndS:
+    lw    $t9, ART_METHOD_QUICK_CODE_OFFSET_32($a0)  # get pointer to the code
+    jalr  $t9                   # call the method
+    sw    $zero, 0($sp)         # store null for ArtMethod* at bottom of frame
+    move  $sp, $fp              # restore the stack
+    lw    $s0, 0($sp)
+    .cfi_restore 16
+    lw    $s1, 4($sp)
+    .cfi_restore 17
+    lw    $fp, 8($sp)
+    .cfi_restore 30
+    lw    $ra, 12($sp)
+    .cfi_restore 31
+    addiu $sp, $sp, 16
+    .cfi_adjust_cfa_offset -16
+    lw    $t0, 16($sp)          # get result pointer
+    lw    $t1, 20($sp)          # get shorty
+    lb    $t1, 0($t1)           # get result type char
+    li    $t2, 'D'              # put char 'D' into t2
+    beq   $t1, $t2, 6f          # branch if result type char == 'D'
+    li    $t3, 'F'              # put char 'F' into t3
+    beq   $t1, $t3, 6f          # branch if result type char == 'F'
+    sw    $v0, 0($t0)           # store the result
+    jalr  $zero, $ra
+    sw    $v1, 4($t0)           # store the other half of the result
+6:
+    SDu   $f0, $f1, 0, $t0, $t1 # store floating point result
+    jalr  $zero, $ra
+    nop
+END art_quick_invoke_static_stub
+
+    /*
      * Entry from managed code that calls artHandleFillArrayDataFromCode and delivers exception on
      * failure.
      */
diff --git a/runtime/arch/mips/quick_method_frame_info_mips.h b/runtime/arch/mips/quick_method_frame_info_mips.h
index dd5ac80..f5d13c2 100644
--- a/runtime/arch/mips/quick_method_frame_info_mips.h
+++ b/runtime/arch/mips/quick_method_frame_info_mips.h
@@ -25,6 +25,8 @@
 namespace art {
 namespace mips {
 
+static constexpr uint32_t kMipsCalleeSaveAlwaysSpills =
+    (1 << art::mips::RA);
 static constexpr uint32_t kMipsCalleeSaveRefSpills =
     (1 << art::mips::S2) | (1 << art::mips::S3) | (1 << art::mips::S4) | (1 << art::mips::S5) |
     (1 << art::mips::S6) | (1 << art::mips::S7) | (1 << art::mips::GP) | (1 << art::mips::FP);
@@ -32,19 +34,26 @@
     (1 << art::mips::A1) | (1 << art::mips::A2) | (1 << art::mips::A3);
 static constexpr uint32_t kMipsCalleeSaveAllSpills =
     (1 << art::mips::S0) | (1 << art::mips::S1);
+
+static constexpr uint32_t kMipsCalleeSaveFpAlwaysSpills = 0;
+static constexpr uint32_t kMipsCalleeSaveFpRefSpills = 0;
+static constexpr uint32_t kMipsCalleeSaveFpArgSpills =
+    (1 << art::mips::F12) | (1 << art::mips::F13) | (1 << art::mips::F14) | (1 << art::mips::F15);
 static constexpr uint32_t kMipsCalleeSaveAllFPSpills =
     (1 << art::mips::F20) | (1 << art::mips::F21) | (1 << art::mips::F22) | (1 << art::mips::F23) |
     (1 << art::mips::F24) | (1 << art::mips::F25) | (1 << art::mips::F26) | (1 << art::mips::F27) |
     (1 << art::mips::F28) | (1 << art::mips::F29) | (1 << art::mips::F30) | (1 << art::mips::F31);
 
 constexpr uint32_t MipsCalleeSaveCoreSpills(Runtime::CalleeSaveType type) {
-  return kMipsCalleeSaveRefSpills |
+  return kMipsCalleeSaveAlwaysSpills | kMipsCalleeSaveRefSpills |
       (type == Runtime::kRefsAndArgs ? kMipsCalleeSaveArgSpills : 0) |
-      (type == Runtime::kSaveAll ? kMipsCalleeSaveAllSpills : 0) | (1 << art::mips::RA);
+      (type == Runtime::kSaveAll ? kMipsCalleeSaveAllSpills : 0);
 }
 
 constexpr uint32_t MipsCalleeSaveFPSpills(Runtime::CalleeSaveType type) {
-  return type == Runtime::kSaveAll ? kMipsCalleeSaveAllFPSpills : 0;
+  return kMipsCalleeSaveFpAlwaysSpills | kMipsCalleeSaveFpRefSpills |
+      (type == Runtime::kRefsAndArgs ? kMipsCalleeSaveFpArgSpills : 0) |
+      (type == Runtime::kSaveAll ? kMipsCalleeSaveAllFPSpills : 0);
 }
 
 constexpr uint32_t MipsCalleeSaveFrameSize(Runtime::CalleeSaveType type) {
diff --git a/runtime/art_method.cc b/runtime/art_method.cc
index f4c6473..56f7b35 100644
--- a/runtime/art_method.cc
+++ b/runtime/art_method.cc
@@ -45,10 +45,8 @@
 
 extern "C" void art_quick_invoke_stub(ArtMethod*, uint32_t*, uint32_t, Thread*, JValue*,
                                       const char*);
-#if defined(__LP64__) || defined(__arm__) || defined(__i386__)
 extern "C" void art_quick_invoke_static_stub(ArtMethod*, uint32_t*, uint32_t, Thread*, JValue*,
                                              const char*);
-#endif
 
 ArtMethod* ArtMethod::FromReflectedMethod(const ScopedObjectAccessAlreadyRunnable& soa,
                                           jobject jlr_method) {
@@ -417,15 +415,11 @@
             << "Don't call compiled code when -Xint " << PrettyMethod(this);
       }
 
-#if defined(__LP64__) || defined(__arm__) || defined(__i386__)
       if (!IsStatic()) {
         (*art_quick_invoke_stub)(this, args, args_size, self, result, shorty);
       } else {
         (*art_quick_invoke_static_stub)(this, args, args_size, self, result, shorty);
       }
-#else
-      (*art_quick_invoke_stub)(this, args, args_size, self, result, shorty);
-#endif
       if (UNLIKELY(self->GetException() == Thread::GetDeoptimizationException())) {
         // Unusual case where we were running generated code and an
         // exception was thrown to force the activations to be removed from the
diff --git a/runtime/check_reference_map_visitor.h b/runtime/check_reference_map_visitor.h
index 3155b51..7965cd7 100644
--- a/runtime/check_reference_map_visitor.h
+++ b/runtime/check_reference_map_visitor.h
@@ -87,9 +87,11 @@
           CHECK(stack_mask.LoadBit(location.GetValue() / kFrameSlotSize));
           break;
         case DexRegisterLocation::Kind::kInRegister:
+        case DexRegisterLocation::Kind::kInRegisterHigh:
           CHECK_NE(register_mask & (1 << location.GetValue()), 0u);
           break;
         case DexRegisterLocation::Kind::kInFpuRegister:
+        case DexRegisterLocation::Kind::kInFpuRegisterHigh:
           // In Fpu register, should not be a reference.
           CHECK(false);
           break;
diff --git a/runtime/entrypoints/quick/quick_trampoline_entrypoints.cc b/runtime/entrypoints/quick/quick_trampoline_entrypoints.cc
index da4b82c..aa35ec1 100644
--- a/runtime/entrypoints/quick/quick_trampoline_entrypoints.cc
+++ b/runtime/entrypoints/quick/quick_trampoline_entrypoints.cc
@@ -65,6 +65,7 @@
   static constexpr bool kAlignPairRegister = !kArm32QuickCodeUseSoftFloat;
   static constexpr bool kQuickSoftFloatAbi = kArm32QuickCodeUseSoftFloat;
   static constexpr bool kQuickDoubleRegAlignedFloatBackFilled = !kArm32QuickCodeUseSoftFloat;
+  static constexpr bool kQuickSkipOddFpRegisters = false;
   static constexpr size_t kNumQuickGprArgs = 3;
   static constexpr size_t kNumQuickFprArgs = kArm32QuickCodeUseSoftFloat ? 0 : 16;
   static constexpr bool kGprFprLockstep = false;
@@ -102,6 +103,7 @@
   static constexpr bool kAlignPairRegister = false;
   static constexpr bool kQuickSoftFloatAbi = false;  // This is a hard float ABI.
   static constexpr bool kQuickDoubleRegAlignedFloatBackFilled = false;
+  static constexpr bool kQuickSkipOddFpRegisters = false;
   static constexpr size_t kNumQuickGprArgs = 7;  // 7 arguments passed in GPRs.
   static constexpr size_t kNumQuickFprArgs = 8;  // 8 arguments passed in FPRs.
   static constexpr bool kGprFprLockstep = false;
@@ -128,17 +130,25 @@
   // | A3         |    arg3
   // | A2         |    arg2
   // | A1         |    arg1
+  // | F15        |
+  // | F14        |    f_arg1
+  // | F13        |
+  // | F12        |    f_arg0
+  // |            |    padding
   // | A0/Method* |  <- sp
-  static constexpr bool kSplitPairAcrossRegisterAndStack = true;
-  static constexpr bool kAlignPairRegister = false;
-  static constexpr bool kQuickSoftFloatAbi = true;  // This is a soft float ABI.
+  static constexpr bool kSplitPairAcrossRegisterAndStack = false;
+  static constexpr bool kAlignPairRegister = true;
+  static constexpr bool kQuickSoftFloatAbi = false;
   static constexpr bool kQuickDoubleRegAlignedFloatBackFilled = false;
+  static constexpr bool kQuickSkipOddFpRegisters = true;
   static constexpr size_t kNumQuickGprArgs = 3;  // 3 arguments passed in GPRs.
-  static constexpr size_t kNumQuickFprArgs = 0;  // 0 arguments passed in FPRs.
+  static constexpr size_t kNumQuickFprArgs = 4;  // 2 arguments passed in FPRs. Floats can be passed
+                                                 // only in even numbered registers and each double
+                                                 // occupies two registers.
   static constexpr bool kGprFprLockstep = false;
-  static constexpr size_t kQuickCalleeSaveFrame_RefAndArgs_Fpr1Offset = 0;  // Offset of first FPR arg.
-  static constexpr size_t kQuickCalleeSaveFrame_RefAndArgs_Gpr1Offset = 16;  // Offset of first GPR arg.
-  static constexpr size_t kQuickCalleeSaveFrame_RefAndArgs_LrOffset = 60;  // Offset of return address.
+  static constexpr size_t kQuickCalleeSaveFrame_RefAndArgs_Fpr1Offset = 16;  // Offset of first FPR arg.
+  static constexpr size_t kQuickCalleeSaveFrame_RefAndArgs_Gpr1Offset = 32;  // Offset of first GPR arg.
+  static constexpr size_t kQuickCalleeSaveFrame_RefAndArgs_LrOffset = 76;  // Offset of return address.
   static size_t GprIndexToGprOffset(uint32_t gpr_index) {
     return gpr_index * GetBytesPerGprSpillLocation(kRuntimeISA);
   }
@@ -153,14 +163,6 @@
   // | Method*    | ---
   // | RA         |
   // | ...        |    callee saves
-  // | F7         |    f_arg7
-  // | F6         |    f_arg6
-  // | F5         |    f_arg5
-  // | F4         |    f_arg4
-  // | F3         |    f_arg3
-  // | F2         |    f_arg2
-  // | F1         |    f_arg1
-  // | F0         |    f_arg0
   // | A7         |    arg7
   // | A6         |    arg6
   // | A5         |    arg5
@@ -168,6 +170,14 @@
   // | A3         |    arg3
   // | A2         |    arg2
   // | A1         |    arg1
+  // | F19        |    f_arg7
+  // | F18        |    f_arg6
+  // | F17        |    f_arg5
+  // | F16        |    f_arg4
+  // | F15        |    f_arg3
+  // | F14        |    f_arg2
+  // | F13        |    f_arg1
+  // | F12        |    f_arg0
   // |            |    padding
   // | A0/Method* |  <- sp
   // NOTE: for Mip64, when A0 is skipped, F0 is also skipped.
@@ -175,9 +185,7 @@
   static constexpr bool kAlignPairRegister = false;
   static constexpr bool kQuickSoftFloatAbi = false;
   static constexpr bool kQuickDoubleRegAlignedFloatBackFilled = false;
-  // These values are set to zeros because GPR and FPR register
-  // assignments for Mips64 are interleaved, which the current VisitArguments()
-  // function does not support.
+  static constexpr bool kQuickSkipOddFpRegisters = false;
   static constexpr size_t kNumQuickGprArgs = 7;  // 7 arguments passed in GPRs.
   static constexpr size_t kNumQuickFprArgs = 7;  // 7 arguments passed in FPRs.
   static constexpr bool kGprFprLockstep = true;
@@ -211,6 +219,7 @@
   static constexpr bool kAlignPairRegister = false;
   static constexpr bool kQuickSoftFloatAbi = false;  // This is a hard float ABI.
   static constexpr bool kQuickDoubleRegAlignedFloatBackFilled = false;
+  static constexpr bool kQuickSkipOddFpRegisters = false;
   static constexpr size_t kNumQuickGprArgs = 3;  // 3 arguments passed in GPRs.
   static constexpr size_t kNumQuickFprArgs = 4;  // 4 arguments passed in FPRs.
   static constexpr bool kGprFprLockstep = false;
@@ -252,6 +261,7 @@
   static constexpr bool kAlignPairRegister = false;
   static constexpr bool kQuickSoftFloatAbi = false;  // This is a hard float ABI.
   static constexpr bool kQuickDoubleRegAlignedFloatBackFilled = false;
+  static constexpr bool kQuickSkipOddFpRegisters = false;
   static constexpr size_t kNumQuickGprArgs = 5;  // 5 arguments passed in GPRs.
   static constexpr size_t kNumQuickFprArgs = 8;  // 8 arguments passed in FPRs.
   static constexpr bool kGprFprLockstep = false;
@@ -475,6 +485,8 @@
                 if (fpr_index_ % 2 == 0) {
                   fpr_index_ = std::max(fpr_double_index_, fpr_index_);
                 }
+              } else if (kQuickSkipOddFpRegisters) {
+                IncFprIndex();
               }
             }
           }
@@ -483,8 +495,9 @@
         case Primitive::kPrimLong:
           if (kQuickSoftFloatAbi || (cur_type_ == Primitive::kPrimLong)) {
             if (cur_type_ == Primitive::kPrimLong && kAlignPairRegister && gpr_index_ == 0) {
-              // Currently, this is only for ARM, where the first available parameter register
-              // is R1. So we skip it, and use R2 instead.
+              // Currently, this is only for ARM and MIPS, where the first available parameter
+              // register is R1 (on ARM) or A1 (on MIPS). So we skip it, and use R2 (on ARM) or
+              // A2 (on MIPS) instead.
               IncGprIndex();
             }
             is_split_long_or_double_ = (GetBytesPerGprSpillLocation(kRuntimeISA) == 4) &&
diff --git a/runtime/native/java_lang_Class.cc b/runtime/native/java_lang_Class.cc
index b9cc219..7e464e9 100644
--- a/runtime/native/java_lang_Class.cc
+++ b/runtime/native/java_lang_Class.cc
@@ -443,7 +443,7 @@
   StackHandleScope<1> hs(soa.Self());
   Handle<mirror::Class> klass(hs.NewHandle(DecodeClass(soa, javaThis)));
   if (klass->IsProxyClass()) {
-    // Return an empty array instead of a null pointer
+    // Return an empty array instead of a null pointer.
     mirror::Class* annotation_array_class =
         soa.Decode<mirror::Class*>(WellKnownClasses::java_lang_annotation_Annotation__array);
     mirror::ObjectArray<mirror::Object>* empty_array =
diff --git a/runtime/native/java_lang_reflect_Constructor.cc b/runtime/native/java_lang_reflect_Constructor.cc
index cf3346d..b4b77e7 100644
--- a/runtime/native/java_lang_reflect_Constructor.cc
+++ b/runtime/native/java_lang_reflect_Constructor.cc
@@ -51,7 +51,7 @@
   mirror::ObjectArray<mirror::Object>* result_array =
       method->GetDexFile()->GetExceptionTypesForMethod(method);
   if (result_array == nullptr) {
-    // Return an empty array instead of a null pointer
+    // Return an empty array instead of a null pointer.
     mirror::Class* class_class = mirror::Class::GetJavaLangClass();
     mirror::Class* class_array_class =
         Runtime::Current()->GetClassLinker()->FindArrayClass(soa.Self(), &class_class);
diff --git a/runtime/native/java_lang_reflect_Field.cc b/runtime/native/java_lang_reflect_Field.cc
index cc60fe0..aac800a 100644
--- a/runtime/native/java_lang_reflect_Field.cc
+++ b/runtime/native/java_lang_reflect_Field.cc
@@ -419,6 +419,9 @@
   ScopedFastNativeObjectAccess soa(env);
   StackHandleScope<1> hs(soa.Self());
   ArtField* field = soa.Decode<mirror::Field*>(javaField)->GetArtField();
+  if (field->GetDeclaringClass()->IsProxyClass()) {
+    return nullptr;
+  }
   Handle<mirror::Class> klass(hs.NewHandle(soa.Decode<mirror::Class*>(annotationType)));
   return soa.AddLocalReference<jobject>(field->GetDexFile()->GetAnnotationForField(field, klass));
 }
@@ -426,12 +429,23 @@
 static jobjectArray Field_getDeclaredAnnotations(JNIEnv* env, jobject javaField) {
   ScopedFastNativeObjectAccess soa(env);
   ArtField* field = soa.Decode<mirror::Field*>(javaField)->GetArtField();
+  if (field->GetDeclaringClass()->IsProxyClass()) {
+    // Return an empty array instead of a null pointer.
+    mirror::Class* annotation_array_class =
+        soa.Decode<mirror::Class*>(WellKnownClasses::java_lang_annotation_Annotation__array);
+    mirror::ObjectArray<mirror::Object>* empty_array =
+        mirror::ObjectArray<mirror::Object>::Alloc(soa.Self(), annotation_array_class, 0);
+    return soa.AddLocalReference<jobjectArray>(empty_array);
+  }
   return soa.AddLocalReference<jobjectArray>(field->GetDexFile()->GetAnnotationsForField(field));
 }
 
 static jobjectArray Field_getSignatureAnnotation(JNIEnv* env, jobject javaField) {
   ScopedFastNativeObjectAccess soa(env);
   ArtField* field = soa.Decode<mirror::Field*>(javaField)->GetArtField();
+  if (field->GetDeclaringClass()->IsProxyClass()) {
+    return nullptr;
+  }
   return soa.AddLocalReference<jobjectArray>(
       field->GetDexFile()->GetSignatureAnnotationForField(field));
 }
@@ -441,6 +455,9 @@
   ScopedFastNativeObjectAccess soa(env);
   StackHandleScope<1> hs(soa.Self());
   ArtField* field = soa.Decode<mirror::Field*>(javaField)->GetArtField();
+  if (field->GetDeclaringClass()->IsProxyClass()) {
+    return false;
+  }
   Handle<mirror::Class> klass(hs.NewHandle(soa.Decode<mirror::Class*>(annotationType)));
   return field->GetDexFile()->IsFieldAnnotationPresent(field, klass);
 }
diff --git a/runtime/native/java_lang_reflect_Method.cc b/runtime/native/java_lang_reflect_Method.cc
index 2f0e7fe..1219f85 100644
--- a/runtime/native/java_lang_reflect_Method.cc
+++ b/runtime/native/java_lang_reflect_Method.cc
@@ -45,7 +45,7 @@
   ScopedFastNativeObjectAccess soa(env);
   ArtMethod* method = ArtMethod::FromReflectedMethod(soa, javaMethod);
   if (method->GetDeclaringClass()->IsProxyClass()) {
-    // Return an empty array instead of a null pointer
+    // Return an empty array instead of a null pointer.
     mirror::Class* annotation_array_class =
         soa.Decode<mirror::Class*>(WellKnownClasses::java_lang_annotation_Annotation__array);
     mirror::ObjectArray<mirror::Object>* empty_array =
diff --git a/runtime/quick_exception_handler.cc b/runtime/quick_exception_handler.cc
index d1a4081..9d5ce9f 100644
--- a/runtime/quick_exception_handler.cc
+++ b/runtime/quick_exception_handler.cc
@@ -152,6 +152,9 @@
   if (instrumentation->HasExceptionCaughtListeners()
       && self_->IsExceptionThrownByCurrentMethod(exception)) {
     instrumentation->ExceptionCaughtEvent(self_, exception_ref.Get());
+    // Instrumentation may have been updated.
+    method_tracing_active_ = is_deoptimization_ ||
+        Runtime::Current()->GetInstrumentation()->AreExitStubsInstalled();
   }
 }
 
diff --git a/runtime/quick_exception_handler.h b/runtime/quick_exception_handler.h
index ce9085d..e934834 100644
--- a/runtime/quick_exception_handler.h
+++ b/runtime/quick_exception_handler.h
@@ -85,7 +85,7 @@
   Context* const context_;
   const bool is_deoptimization_;
   // Is method tracing active?
-  const bool method_tracing_active_;
+  bool method_tracing_active_;
   // Quick frame with found handler or last frame if no handler found.
   ArtMethod** handler_quick_frame_;
   // PC to branch to for the handler.
diff --git a/runtime/stack.cc b/runtime/stack.cc
index b07b244..a765a3f 100644
--- a/runtime/stack.cc
+++ b/runtime/stack.cc
@@ -299,7 +299,9 @@
       return true;
     }
     case DexRegisterLocation::Kind::kInRegister:
-    case DexRegisterLocation::Kind::kInFpuRegister: {
+    case DexRegisterLocation::Kind::kInRegisterHigh:
+    case DexRegisterLocation::Kind::kInFpuRegister:
+    case DexRegisterLocation::Kind::kInFpuRegisterHigh: {
       uint32_t reg =
           dex_register_map.GetMachineRegister(vreg, number_of_dex_registers, code_info, encoding);
       return GetRegisterIfAccessible(reg, kind, val);
diff --git a/runtime/stack_map.h b/runtime/stack_map.h
index 0d3816b..07b79b5 100644
--- a/runtime/stack_map.h
+++ b/runtime/stack_map.h
@@ -59,26 +59,33 @@
   /*
    * The location kind used to populate the Dex register information in a
    * StackMapStream can either be:
-   * - kNone: the register has no location yet, meaning it has not been set;
+   * - kStack: vreg stored on the stack, value holds the stack offset;
+   * - kInRegister: vreg stored in low 32 bits of a core physical register,
+   *                value holds the register number;
+   * - kInRegisterHigh: vreg stored in high 32 bits of a core physical register,
+   *                    value holds the register number;
+   * - kInFpuRegister: vreg stored in low 32 bits of an FPU register,
+   *                   value holds the register number;
+   * - kInFpuRegisterHigh: vreg stored in high 32 bits of an FPU register,
+   *                       value holds the register number;
    * - kConstant: value holds the constant;
-   * - kStack: value holds the stack offset;
-   * - kRegister: value holds the physical register number;
-   * - kFpuRegister: value holds the physical register number.
    *
    * In addition, DexRegisterMap also uses these values:
    * - kInStackLargeOffset: value holds a "large" stack offset (greater than
    *   or equal to 128 bytes);
    * - kConstantLargeValue: value holds a "large" constant (lower than 0, or
-   *   or greater than or equal to 32).
+   *   or greater than or equal to 32);
+   * - kNone: the register has no location, meaning it has not been set.
    */
   enum class Kind : uint8_t {
     // Short location kinds, for entries fitting on one byte (3 bits
     // for the kind, 5 bits for the value) in a DexRegisterMap.
-    kNone = 0,                // 0b000
-    kInStack = 1,             // 0b001
-    kInRegister = 2,          // 0b010
+    kInStack = 0,             // 0b000
+    kInRegister = 1,          // 0b001
+    kInRegisterHigh = 2,      // 0b010
     kInFpuRegister = 3,       // 0b011
-    kConstant = 4,            // 0b100
+    kInFpuRegisterHigh = 4,   // 0b100
+    kConstant = 5,            // 0b101
 
     // Large location kinds, requiring a 5-byte encoding (1 byte for the
     // kind, 4 bytes for the value).
@@ -87,11 +94,14 @@
     // divided by the stack frame slot size (4 bytes) cannot fit on a
     // 5-bit unsigned integer (i.e., this offset value is greater than
     // or equal to 2^5 * 4 = 128 bytes).
-    kInStackLargeOffset = 5,  // 0b101
+    kInStackLargeOffset = 6,  // 0b110
 
     // Large constant, that cannot fit on a 5-bit signed integer (i.e.,
     // lower than 0, or greater than or equal to 2^5 = 32).
-    kConstantLargeValue = 6,  // 0b110
+    kConstantLargeValue = 7,  // 0b111
+
+    // Entries with no location are not stored and do not need own marker.
+    kNone = static_cast<uint8_t>(-1),
 
     kLastLocationKind = kConstantLargeValue
   };
@@ -108,25 +118,29 @@
         return "in stack";
       case Kind::kInRegister:
         return "in register";
+      case Kind::kInRegisterHigh:
+        return "in register high";
       case Kind::kInFpuRegister:
         return "in fpu register";
+      case Kind::kInFpuRegisterHigh:
+        return "in fpu register high";
       case Kind::kConstant:
         return "as constant";
       case Kind::kInStackLargeOffset:
         return "in stack (large offset)";
       case Kind::kConstantLargeValue:
         return "as constant (large value)";
-      default:
-        UNREACHABLE();
     }
+    UNREACHABLE();
   }
 
   static bool IsShortLocationKind(Kind kind) {
     switch (kind) {
-      case Kind::kNone:
       case Kind::kInStack:
       case Kind::kInRegister:
+      case Kind::kInRegisterHigh:
       case Kind::kInFpuRegister:
+      case Kind::kInFpuRegisterHigh:
       case Kind::kConstant:
         return true;
 
@@ -134,9 +148,10 @@
       case Kind::kConstantLargeValue:
         return false;
 
-      default:
-        UNREACHABLE();
+      case Kind::kNone:
+        LOG(FATAL) << "Unexpected location kind " << PrettyDescriptor(kind);
     }
+    UNREACHABLE();
   }
 
   // Convert `kind` to a "surface" kind, i.e. one that doesn't include
@@ -144,10 +159,11 @@
   // TODO: Introduce another enum type for the surface kind?
   static Kind ConvertToSurfaceKind(Kind kind) {
     switch (kind) {
-      case Kind::kNone:
       case Kind::kInStack:
       case Kind::kInRegister:
+      case Kind::kInRegisterHigh:
       case Kind::kInFpuRegister:
+      case Kind::kInFpuRegisterHigh:
       case Kind::kConstant:
         return kind;
 
@@ -157,9 +173,10 @@
       case Kind::kConstantLargeValue:
         return Kind::kConstant;
 
-      default:
-        UNREACHABLE();
+      case Kind::kNone:
+        return kind;
     }
+    UNREACHABLE();
   }
 
   // Required by art::StackMapStream::LocationCatalogEntriesIndices.
@@ -305,55 +322,60 @@
 
   // Compute the compressed kind of `location`.
   static DexRegisterLocation::Kind ComputeCompressedKind(const DexRegisterLocation& location) {
-    switch (location.GetInternalKind()) {
-      case DexRegisterLocation::Kind::kNone:
-        DCHECK_EQ(location.GetValue(), 0);
-        return DexRegisterLocation::Kind::kNone;
-
-      case DexRegisterLocation::Kind::kInRegister:
-        DCHECK_GE(location.GetValue(), 0);
-        DCHECK_LT(location.GetValue(), 1 << kValueBits);
-        return DexRegisterLocation::Kind::kInRegister;
-
-      case DexRegisterLocation::Kind::kInFpuRegister:
-        DCHECK_GE(location.GetValue(), 0);
-        DCHECK_LT(location.GetValue(), 1 << kValueBits);
-        return DexRegisterLocation::Kind::kInFpuRegister;
-
+    DexRegisterLocation::Kind kind = location.GetInternalKind();
+    switch (kind) {
       case DexRegisterLocation::Kind::kInStack:
         return IsShortStackOffsetValue(location.GetValue())
             ? DexRegisterLocation::Kind::kInStack
             : DexRegisterLocation::Kind::kInStackLargeOffset;
 
+      case DexRegisterLocation::Kind::kInRegister:
+      case DexRegisterLocation::Kind::kInRegisterHigh:
+        DCHECK_GE(location.GetValue(), 0);
+        DCHECK_LT(location.GetValue(), 1 << kValueBits);
+        return kind;
+
+      case DexRegisterLocation::Kind::kInFpuRegister:
+      case DexRegisterLocation::Kind::kInFpuRegisterHigh:
+        DCHECK_GE(location.GetValue(), 0);
+        DCHECK_LT(location.GetValue(), 1 << kValueBits);
+        return kind;
+
       case DexRegisterLocation::Kind::kConstant:
         return IsShortConstantValue(location.GetValue())
             ? DexRegisterLocation::Kind::kConstant
             : DexRegisterLocation::Kind::kConstantLargeValue;
 
-      default:
-        LOG(FATAL) << "Unexpected location kind"
-                   << DexRegisterLocation::PrettyDescriptor(location.GetInternalKind());
-        UNREACHABLE();
+      case DexRegisterLocation::Kind::kConstantLargeValue:
+      case DexRegisterLocation::Kind::kInStackLargeOffset:
+      case DexRegisterLocation::Kind::kNone:
+        LOG(FATAL) << "Unexpected location kind " << DexRegisterLocation::PrettyDescriptor(kind);
     }
+    UNREACHABLE();
   }
 
   // Can `location` be turned into a short location?
   static bool CanBeEncodedAsShortLocation(const DexRegisterLocation& location) {
-    switch (location.GetInternalKind()) {
-      case DexRegisterLocation::Kind::kNone:
-      case DexRegisterLocation::Kind::kInRegister:
-      case DexRegisterLocation::Kind::kInFpuRegister:
-        return true;
-
+    DexRegisterLocation::Kind kind = location.GetInternalKind();
+    switch (kind) {
       case DexRegisterLocation::Kind::kInStack:
         return IsShortStackOffsetValue(location.GetValue());
 
+      case DexRegisterLocation::Kind::kInRegister:
+      case DexRegisterLocation::Kind::kInRegisterHigh:
+      case DexRegisterLocation::Kind::kInFpuRegister:
+      case DexRegisterLocation::Kind::kInFpuRegisterHigh:
+        return true;
+
       case DexRegisterLocation::Kind::kConstant:
         return IsShortConstantValue(location.GetValue());
 
-      default:
-        UNREACHABLE();
+      case DexRegisterLocation::Kind::kConstantLargeValue:
+      case DexRegisterLocation::Kind::kInStackLargeOffset:
+      case DexRegisterLocation::Kind::kNone:
+        LOG(FATAL) << "Unexpected location kind " << DexRegisterLocation::PrettyDescriptor(kind);
     }
+    UNREACHABLE();
   }
 
   static size_t EntrySize(const DexRegisterLocation& location) {
@@ -501,8 +523,10 @@
                              const StackMapEncoding& enc) const {
     DexRegisterLocation location =
         GetDexRegisterLocation(dex_register_number, number_of_dex_registers, code_info, enc);
-    DCHECK(location.GetInternalKind() == DexRegisterLocation::Kind::kInRegister
-           || location.GetInternalKind() == DexRegisterLocation::Kind::kInFpuRegister)
+    DCHECK(location.GetInternalKind() == DexRegisterLocation::Kind::kInRegister ||
+           location.GetInternalKind() == DexRegisterLocation::Kind::kInRegisterHigh ||
+           location.GetInternalKind() == DexRegisterLocation::Kind::kInFpuRegister ||
+           location.GetInternalKind() == DexRegisterLocation::Kind::kInFpuRegisterHigh)
         << DexRegisterLocation::PrettyDescriptor(location.GetInternalKind());
     return location.GetValue();
   }
diff --git a/runtime/verifier/method_verifier.cc b/runtime/verifier/method_verifier.cc
index a506071..efefa8b 100644
--- a/runtime/verifier/method_verifier.cc
+++ b/runtime/verifier/method_verifier.cc
@@ -1387,13 +1387,15 @@
       if (declaring_class.IsJavaLangObject()) {
         // "this" is implicitly initialized.
         reg_line->SetThisInitialized();
-        reg_line->SetRegisterType(this, arg_start + cur_arg, declaring_class);
+        reg_line->SetRegisterType<LockOp::kClear>(this, arg_start + cur_arg, declaring_class);
       } else {
-        reg_line->SetRegisterType(this, arg_start + cur_arg,
-                                  reg_types_.UninitializedThisArgument(declaring_class));
+        reg_line->SetRegisterType<LockOp::kClear>(
+            this,
+            arg_start + cur_arg,
+            reg_types_.UninitializedThisArgument(declaring_class));
       }
     } else {
-      reg_line->SetRegisterType(this, arg_start + cur_arg, declaring_class);
+      reg_line->SetRegisterType<LockOp::kClear>(this, arg_start + cur_arg, declaring_class);
     }
     cur_arg++;
   }
@@ -1425,26 +1427,26 @@
             DCHECK(HasFailures());
             return false;
           }
-          reg_line->SetRegisterType(this, arg_start + cur_arg, reg_type);
+          reg_line->SetRegisterType<LockOp::kClear>(this, arg_start + cur_arg, reg_type);
         }
         break;
       case 'Z':
-        reg_line->SetRegisterType(this, arg_start + cur_arg, reg_types_.Boolean());
+        reg_line->SetRegisterType<LockOp::kClear>(this, arg_start + cur_arg, reg_types_.Boolean());
         break;
       case 'C':
-        reg_line->SetRegisterType(this, arg_start + cur_arg, reg_types_.Char());
+        reg_line->SetRegisterType<LockOp::kClear>(this, arg_start + cur_arg, reg_types_.Char());
         break;
       case 'B':
-        reg_line->SetRegisterType(this, arg_start + cur_arg, reg_types_.Byte());
+        reg_line->SetRegisterType<LockOp::kClear>(this, arg_start + cur_arg, reg_types_.Byte());
         break;
       case 'I':
-        reg_line->SetRegisterType(this, arg_start + cur_arg, reg_types_.Integer());
+        reg_line->SetRegisterType<LockOp::kClear>(this, arg_start + cur_arg, reg_types_.Integer());
         break;
       case 'S':
-        reg_line->SetRegisterType(this, arg_start + cur_arg, reg_types_.Short());
+        reg_line->SetRegisterType<LockOp::kClear>(this, arg_start + cur_arg, reg_types_.Short());
         break;
       case 'F':
-        reg_line->SetRegisterType(this, arg_start + cur_arg, reg_types_.Float());
+        reg_line->SetRegisterType<LockOp::kClear>(this, arg_start + cur_arg, reg_types_.Float());
         break;
       case 'J':
       case 'D': {
@@ -1787,7 +1789,7 @@
        * that as part of extracting the exception type from the catch block list.
        */
       const RegType& res_type = GetCaughtExceptionType();
-      work_line_->SetRegisterType(this, inst->VRegA_11x(), res_type);
+      work_line_->SetRegisterType<LockOp::kClear>(this, inst->VRegA_11x(), res_type);
       break;
     }
     case Instruction::RETURN_VOID:
@@ -1887,26 +1889,26 @@
       /* could be boolean, int, float, or a null reference */
     case Instruction::CONST_4: {
       int32_t val = static_cast<int32_t>(inst->VRegB_11n() << 28) >> 28;
-      work_line_->SetRegisterType(this, inst->VRegA_11n(),
-                                  DetermineCat1Constant(val, need_precise_constants_));
+      work_line_->SetRegisterType<LockOp::kClear>(
+          this, inst->VRegA_11n(), DetermineCat1Constant(val, need_precise_constants_));
       break;
     }
     case Instruction::CONST_16: {
       int16_t val = static_cast<int16_t>(inst->VRegB_21s());
-      work_line_->SetRegisterType(this, inst->VRegA_21s(),
-                                  DetermineCat1Constant(val, need_precise_constants_));
+      work_line_->SetRegisterType<LockOp::kClear>(
+          this, inst->VRegA_21s(), DetermineCat1Constant(val, need_precise_constants_));
       break;
     }
     case Instruction::CONST: {
       int32_t val = inst->VRegB_31i();
-      work_line_->SetRegisterType(this, inst->VRegA_31i(),
-                                  DetermineCat1Constant(val, need_precise_constants_));
+      work_line_->SetRegisterType<LockOp::kClear>(
+          this, inst->VRegA_31i(), DetermineCat1Constant(val, need_precise_constants_));
       break;
     }
     case Instruction::CONST_HIGH16: {
       int32_t val = static_cast<int32_t>(inst->VRegB_21h() << 16);
-      work_line_->SetRegisterType(this, inst->VRegA_21h(),
-                                  DetermineCat1Constant(val, need_precise_constants_));
+      work_line_->SetRegisterType<LockOp::kClear>(
+          this, inst->VRegA_21h(), DetermineCat1Constant(val, need_precise_constants_));
       break;
     }
       /* could be long or double; resolved upon use */
@@ -1939,19 +1941,21 @@
       break;
     }
     case Instruction::CONST_STRING:
-      work_line_->SetRegisterType(this, inst->VRegA_21c(), reg_types_.JavaLangString());
+      work_line_->SetRegisterType<LockOp::kClear>(
+          this, inst->VRegA_21c(), reg_types_.JavaLangString());
       break;
     case Instruction::CONST_STRING_JUMBO:
-      work_line_->SetRegisterType(this, inst->VRegA_31c(), reg_types_.JavaLangString());
+      work_line_->SetRegisterType<LockOp::kClear>(
+          this, inst->VRegA_31c(), reg_types_.JavaLangString());
       break;
     case Instruction::CONST_CLASS: {
       // Get type from instruction if unresolved then we need an access check
       // TODO: check Compiler::CanAccessTypeWithoutChecks returns false when res_type is unresolved
       const RegType& res_type = ResolveClassAndCheckAccess(inst->VRegB_21c());
       // Register holds class, ie its type is class, on error it will hold Conflict.
-      work_line_->SetRegisterType(this, inst->VRegA_21c(),
-                                  res_type.IsConflict() ? res_type
-                                                        : reg_types_.JavaLangClass());
+      work_line_->SetRegisterType<LockOp::kClear>(
+          this, inst->VRegA_21c(), res_type.IsConflict() ? res_type
+                                                         : reg_types_.JavaLangClass());
       break;
     }
     case Instruction::MONITOR_ENTER:
@@ -2032,7 +2036,9 @@
 
         DCHECK_NE(failures_.size(), 0U);
         if (!is_checkcast) {
-          work_line_->SetRegisterType(this, inst->VRegA_22c(), reg_types_.Boolean());
+          work_line_->SetRegisterType<LockOp::kClear>(this,
+                                                      inst->VRegA_22c(),
+                                                      reg_types_.Boolean());
         }
         break;  // bad class
       }
@@ -2053,9 +2059,11 @@
         }
       } else {
         if (is_checkcast) {
-          work_line_->SetRegisterType(this, inst->VRegA_21c(), res_type);
+          work_line_->SetRegisterType<LockOp::kKeep>(this, inst->VRegA_21c(), res_type);
         } else {
-          work_line_->SetRegisterType(this, inst->VRegA_22c(), reg_types_.Boolean());
+          work_line_->SetRegisterType<LockOp::kClear>(this,
+                                                      inst->VRegA_22c(),
+                                                      reg_types_.Boolean());
         }
       }
       break;
@@ -2066,7 +2074,9 @@
         if (!res_type.IsArrayTypes() && !res_type.IsZero()) {  // ie not an array or null
           Fail(VERIFY_ERROR_BAD_CLASS_HARD) << "array-length on non-array " << res_type;
         } else {
-          work_line_->SetRegisterType(this, inst->VRegA_12x(), reg_types_.Integer());
+          work_line_->SetRegisterType<LockOp::kClear>(this,
+                                                      inst->VRegA_12x(),
+                                                      reg_types_.Integer());
         }
       } else {
         Fail(VERIFY_ERROR_BAD_CLASS_HARD) << "array-length on non-array " << res_type;
@@ -2091,7 +2101,7 @@
       // initialized must be marked invalid.
       work_line_->MarkUninitRefsAsInvalid(this, uninit_type);
       // add the new uninitialized reference to the register state
-      work_line_->SetRegisterType(this, inst->VRegA_21c(), uninit_type);
+      work_line_->SetRegisterType<LockOp::kClear>(this, inst->VRegA_21c(), uninit_type);
       break;
     }
     case Instruction::NEW_ARRAY:
@@ -2113,7 +2123,7 @@
       if (!work_line_->VerifyRegisterType(this, inst->VRegC_23x(), reg_types_.Float())) {
         break;
       }
-      work_line_->SetRegisterType(this, inst->VRegA_23x(), reg_types_.Integer());
+      work_line_->SetRegisterType<LockOp::kClear>(this, inst->VRegA_23x(), reg_types_.Integer());
       break;
     case Instruction::CMPL_DOUBLE:
     case Instruction::CMPG_DOUBLE:
@@ -2125,7 +2135,7 @@
                                               reg_types_.DoubleHi())) {
         break;
       }
-      work_line_->SetRegisterType(this, inst->VRegA_23x(), reg_types_.Integer());
+      work_line_->SetRegisterType<LockOp::kClear>(this, inst->VRegA_23x(), reg_types_.Integer());
       break;
     case Instruction::CMP_LONG:
       if (!work_line_->VerifyRegisterTypeWide(this, inst->VRegB_23x(), reg_types_.LongLo(),
@@ -2136,7 +2146,7 @@
                                               reg_types_.LongHi())) {
         break;
       }
-      work_line_->SetRegisterType(this, inst->VRegA_23x(), reg_types_.Integer());
+      work_line_->SetRegisterType<LockOp::kClear>(this, inst->VRegA_23x(), reg_types_.Integer());
       break;
     case Instruction::THROW: {
       const RegType& res_type = work_line_->GetRegisterType(this, inst->VRegA_11x());
@@ -2291,7 +2301,9 @@
             branch_line.reset(update_line);
           }
           update_line->CopyFromLine(work_line_.get());
-          update_line->SetRegisterType(this, instance_of_inst->VRegB_22c(), cast_type);
+          update_line->SetRegisterType<LockOp::kKeep>(this,
+                                                      instance_of_inst->VRegB_22c(),
+                                                      cast_type);
           if (!insn_flags_[instance_of_idx].IsBranchTarget() && 0 != instance_of_idx) {
             // See if instance-of was preceded by a move-object operation, common due to the small
             // register encoding space of instance-of, and propagate type information to the source
@@ -2309,17 +2321,23 @@
             switch (move_inst->Opcode()) {
               case Instruction::MOVE_OBJECT:
                 if (move_inst->VRegA_12x() == instance_of_inst->VRegB_22c()) {
-                  update_line->SetRegisterType(this, move_inst->VRegB_12x(), cast_type);
+                  update_line->SetRegisterType<LockOp::kKeep>(this,
+                                                              move_inst->VRegB_12x(),
+                                                              cast_type);
                 }
                 break;
               case Instruction::MOVE_OBJECT_FROM16:
                 if (move_inst->VRegA_22x() == instance_of_inst->VRegB_22c()) {
-                  update_line->SetRegisterType(this, move_inst->VRegB_22x(), cast_type);
+                  update_line->SetRegisterType<LockOp::kKeep>(this,
+                                                              move_inst->VRegB_22x(),
+                                                              cast_type);
                 }
                 break;
               case Instruction::MOVE_OBJECT_16:
                 if (move_inst->VRegA_32x() == instance_of_inst->VRegB_22c()) {
-                  update_line->SetRegisterType(this, move_inst->VRegB_32x(), cast_type);
+                  update_line->SetRegisterType<LockOp::kKeep>(this,
+                                                              move_inst->VRegB_32x(),
+                                                              cast_type);
                 }
                 break;
               default:
@@ -3010,7 +3028,7 @@
       // is good enough for some other verification to occur without hard-failing.
       const uint32_t vreg_target_object = inst->VRegA_22x();  // box-lambda vA, vB
       const RegType& reg_type = reg_types_.JavaLangObject(need_precise_constants_);
-      work_line_->SetRegisterType(this, vreg_target_object, reg_type);
+      work_line_->SetRegisterType<LockOp::kClear>(this, vreg_target_object, reg_type);
       break;
     }
 
@@ -3839,7 +3857,7 @@
       work_line_->VerifyRegisterType(this, inst->VRegB_22c(), reg_types_.Integer());
       /* set register type to array class */
       const RegType& precise_type = reg_types_.FromUninitialized(res_type);
-      work_line_->SetRegisterType(this, inst->VRegA_22c(), precise_type);
+      work_line_->SetRegisterType<LockOp::kClear>(this, inst->VRegA_22c(), precise_type);
     } else {
       // Verify each register. If "arg_count" is bad, VerifyRegisterType() will run off the end of
       // the list and fail. It's legal, if silly, for arg_count to be zero.
@@ -3876,7 +3894,7 @@
       // instruction type. TODO: have a proper notion of bottom here.
       if (!is_primitive || insn_type.IsCategory1Types()) {
         // Reference or category 1
-        work_line_->SetRegisterType(this, inst->VRegA_23x(), reg_types_.Zero());
+        work_line_->SetRegisterType<LockOp::kClear>(this, inst->VRegA_23x(), reg_types_.Zero());
       } else {
         // Category 2
         work_line_->SetRegisterTypeWide(this, inst->VRegA_23x(),
@@ -3904,7 +3922,7 @@
         // instruction, which can't differentiate object types and ints from floats, longs from
         // doubles.
         if (!component_type.IsLowHalf()) {
-          work_line_->SetRegisterType(this, inst->VRegA_23x(), component_type);
+          work_line_->SetRegisterType<LockOp::kClear>(this, inst->VRegA_23x(), component_type);
         } else {
           work_line_->SetRegisterTypeWide(this, inst->VRegA_23x(), component_type,
                                           component_type.HighHalf(&reg_types_));
@@ -4208,13 +4226,13 @@
                     << "' but found type '" << *field_type
                     << "' in get-object";
         if (error != VERIFY_ERROR_BAD_CLASS_HARD) {
-          work_line_->SetRegisterType(this, vregA, reg_types_.Conflict());
+          work_line_->SetRegisterType<LockOp::kClear>(this, vregA, reg_types_.Conflict());
         }
         return;
       }
     }
     if (!field_type->IsLowHalf()) {
-      work_line_->SetRegisterType(this, vregA, *field_type);
+      work_line_->SetRegisterType<LockOp::kClear>(this, vregA, *field_type);
     } else {
       work_line_->SetRegisterTypeWide(this, vregA, *field_type, field_type->HighHalf(&reg_types_));
     }
@@ -4357,12 +4375,12 @@
                                           << " to be compatible with type '" << insn_type
                                           << "' but found type '" << *field_type
                                           << "' in get-object";
-        work_line_->SetRegisterType(this, vregA, reg_types_.Conflict());
+        work_line_->SetRegisterType<LockOp::kClear>(this, vregA, reg_types_.Conflict());
         return;
       }
     }
     if (!field_type->IsLowHalf()) {
-      work_line_->SetRegisterType(this, vregA, *field_type);
+      work_line_->SetRegisterType<LockOp::kClear>(this, vregA, *field_type);
     } else {
       work_line_->SetRegisterTypeWide(this, vregA, *field_type, field_type->HighHalf(&reg_types_));
     }
diff --git a/runtime/verifier/register_line-inl.h b/runtime/verifier/register_line-inl.h
index 9cd2bdf..bee5834 100644
--- a/runtime/verifier/register_line-inl.h
+++ b/runtime/verifier/register_line-inl.h
@@ -31,6 +31,7 @@
   return verifier->GetRegTypeCache()->GetFromId(line_[vsrc]);
 }
 
+template <LockOp kLockOp>
 inline bool RegisterLine::SetRegisterType(MethodVerifier* verifier, uint32_t vdst,
                                           const RegType& new_type) {
   DCHECK_LT(vdst, num_regs_);
@@ -43,8 +44,16 @@
     //       as they are not accessed, and our backends can handle this nowadays.
     line_[vdst] = new_type.GetId();
   }
-  // Clear the monitor entry bits for this register.
-  ClearAllRegToLockDepths(vdst);
+  switch (kLockOp) {
+    case LockOp::kClear:
+      // Clear the monitor entry bits for this register.
+      ClearAllRegToLockDepths(vdst);
+      break;
+    case LockOp::kKeep:
+      // Should only be doing this with reference types.
+      DCHECK(new_type.IsReferenceTypes());
+      break;
+  }
   return true;
 }
 
@@ -89,7 +98,7 @@
                                  TypeCategory cat) {
   DCHECK(cat == kTypeCategory1nr || cat == kTypeCategoryRef);
   const RegType& type = GetRegisterType(verifier, vsrc);
-  if (!SetRegisterType(verifier, vdst, type)) {
+  if (!SetRegisterType<LockOp::kClear>(verifier, vdst, type)) {
     return;
   }
   if (!type.IsConflict() &&                                  // Allow conflicts to be copied around.
diff --git a/runtime/verifier/register_line.cc b/runtime/verifier/register_line.cc
index f286a45..bb6df76 100644
--- a/runtime/verifier/register_line.cc
+++ b/runtime/verifier/register_line.cc
@@ -155,6 +155,9 @@
   for (const auto& monitor : monitors_) {
     result += StringPrintf("{%d},", monitor);
   }
+  for (auto& pairs : reg_to_lock_depths_) {
+    result += StringPrintf("<%d -> %x>", pairs.first, pairs.second);
+  }
   return result;
 }
 
@@ -175,7 +178,7 @@
         << "copyRes1 v" << vdst << "<- result0"  << " type=" << type;
   } else {
     DCHECK(verifier->GetRegTypeCache()->GetFromId(result_[1]).IsUndefined());
-    SetRegisterType(verifier, vdst, type);
+    SetRegisterType<LockOp::kClear>(verifier, vdst, type);
     result_[0] = verifier->GetRegTypeCache()->Undefined().GetId();
   }
 }
@@ -201,7 +204,7 @@
 void RegisterLine::CheckUnaryOp(MethodVerifier* verifier, const Instruction* inst,
                                 const RegType& dst_type, const RegType& src_type) {
   if (VerifyRegisterType(verifier, inst->VRegB_12x(), src_type)) {
-    SetRegisterType(verifier, inst->VRegA_12x(), dst_type);
+    SetRegisterType<LockOp::kClear>(verifier, inst->VRegA_12x(), dst_type);
   }
 }
 
@@ -225,7 +228,7 @@
                                         const RegType& dst_type,
                                         const RegType& src_type1, const RegType& src_type2) {
   if (VerifyRegisterTypeWide(verifier, inst->VRegB_12x(), src_type1, src_type2)) {
-    SetRegisterType(verifier, inst->VRegA_12x(), dst_type);
+    SetRegisterType<LockOp::kClear>(verifier, inst->VRegA_12x(), dst_type);
   }
 }
 
@@ -241,11 +244,13 @@
       DCHECK(dst_type.IsInteger());
       if (GetRegisterType(verifier, vregB).IsBooleanTypes() &&
           GetRegisterType(verifier, vregC).IsBooleanTypes()) {
-        SetRegisterType(verifier, inst->VRegA_23x(), verifier->GetRegTypeCache()->Boolean());
+        SetRegisterType<LockOp::kClear>(verifier,
+                                        inst->VRegA_23x(),
+                                        verifier->GetRegTypeCache()->Boolean());
         return;
       }
     }
-    SetRegisterType(verifier, inst->VRegA_23x(), dst_type);
+    SetRegisterType<LockOp::kClear>(verifier, inst->VRegA_23x(), dst_type);
   }
 }
 
@@ -279,11 +284,13 @@
       DCHECK(dst_type.IsInteger());
       if (GetRegisterType(verifier, vregA).IsBooleanTypes() &&
           GetRegisterType(verifier, vregB).IsBooleanTypes()) {
-        SetRegisterType(verifier, vregA, verifier->GetRegTypeCache()->Boolean());
+        SetRegisterType<LockOp::kClear>(verifier,
+                                        vregA,
+                                        verifier->GetRegTypeCache()->Boolean());
         return;
       }
     }
-    SetRegisterType(verifier, vregA, dst_type);
+    SetRegisterType<LockOp::kClear>(verifier, vregA, dst_type);
   }
 }
 
@@ -321,11 +328,13 @@
       /* check vB with the call, then check the constant manually */
       const uint32_t val = is_lit16 ? inst->VRegC_22s() : inst->VRegC_22b();
       if (GetRegisterType(verifier, vregB).IsBooleanTypes() && (val == 0 || val == 1)) {
-        SetRegisterType(verifier, vregA, verifier->GetRegTypeCache()->Boolean());
+        SetRegisterType<LockOp::kClear>(verifier,
+                                        vregA,
+                                        verifier->GetRegTypeCache()->Boolean());
         return;
       }
     }
-    SetRegisterType(verifier, vregA, dst_type);
+    SetRegisterType<LockOp::kClear>(verifier, vregA, dst_type);
   }
 }
 
diff --git a/runtime/verifier/register_line.h b/runtime/verifier/register_line.h
index a9c4c95..41f1e28 100644
--- a/runtime/verifier/register_line.h
+++ b/runtime/verifier/register_line.h
@@ -47,6 +47,12 @@
   kTypeCategoryRef = 3,         // object reference
 };
 
+// What to do with the lock levels when setting the register type.
+enum class LockOp {
+  kClear,                       // Clear the lock levels recorded.
+  kKeep                         // Leave the lock levels alone.
+};
+
 // During verification, we associate one of these with every "interesting" instruction. We track
 // the status of all registers, and (if the method has any monitor-enter instructions) maintain a
 // stack of entered monitors (identified by code unit offset).
@@ -83,6 +89,15 @@
   // Set the type of register N, verifying that the register is valid.  If "newType" is the "Lo"
   // part of a 64-bit value, register N+1 will be set to "newType+1".
   // The register index was validated during the static pass, so we don't need to check it here.
+  //
+  // LockOp::kClear should be used by default; it will clear the lock levels associated with the
+  // register. An example is setting the register type because an instruction writes to the
+  // register.
+  // LockOp::kKeep keeps the lock levels of the register and only changes the register type. This
+  // is typical when the underlying value did not change, but we have "different" type information
+  // available now. An example is sharpening types after a check-cast. Note that when given kKeep,
+  // the new_type is dchecked to be a reference type.
+  template <LockOp kLockOp>
   ALWAYS_INLINE bool SetRegisterType(MethodVerifier* verifier, uint32_t vdst,
                                      const RegType& new_type)
       SHARED_REQUIRES(Locks::mutator_lock_);
diff --git a/test/800-smali/expected.txt b/test/800-smali/expected.txt
index 6a452eb..6568eac 100644
--- a/test/800-smali/expected.txt
+++ b/test/800-smali/expected.txt
@@ -42,4 +42,6 @@
 b/23201502 (double)
 b/23300986
 b/23300986 (2)
+b/23502994 (if-eqz)
+b/23502994 (check-cast)
 Done!
diff --git a/test/800-smali/smali/b_23502994.smali b/test/800-smali/smali/b_23502994.smali
new file mode 100644
index 0000000..d1d0554
--- /dev/null
+++ b/test/800-smali/smali/b_23502994.smali
@@ -0,0 +1,45 @@
+.class public LB23502994;
+
+.super Ljava/lang/Object;
+
+.method public static runIF_EQZ(Ljava/lang/Object;)V
+   .registers 3
+   monitor-enter v2        # Lock on parameter
+
+   # Sharpen, and try to unlock (in both branches). We should not lose the lock info when we make
+   # the register type more precise.
+
+   instance-of v0, v2, Ljava/lang/String;
+   if-eqz v0, :LnotString
+
+   # At this point v2 is of type Ljava/lang/String;
+   monitor-exit v2
+
+   goto :Lend
+
+:LnotString
+   monitor-exit v2         # Unlock the else branch
+
+   # Fall-through.
+
+:Lend
+   return-void
+
+.end method
+
+
+.method public static runCHECKCAST(Ljava/lang/Object;)V
+   .registers 3
+   monitor-enter v2        # Lock on parameter
+
+   # Sharpen, and try to unlock. We should not lose the lock info when we make the register type
+   # more precise.
+
+   check-cast v2, Ljava/lang/String;
+
+   # At this point v2 is of type Ljava/lang/String;
+   monitor-exit v2
+
+   return-void
+
+.end method
diff --git a/test/800-smali/src/Main.java b/test/800-smali/src/Main.java
index 183958a..ba4990a 100644
--- a/test/800-smali/src/Main.java
+++ b/test/800-smali/src/Main.java
@@ -129,6 +129,10 @@
                 new Object[] { new Object() }, null, null));
         testCases.add(new TestCase("b/23300986 (2)", "B23300986", "runAliasBeforeEnter",
                 new Object[] { new Object() }, null, null));
+        testCases.add(new TestCase("b/23502994 (if-eqz)", "B23502994", "runIF_EQZ",
+                new Object[] { new Object() }, null, null));
+        testCases.add(new TestCase("b/23502994 (check-cast)", "B23502994", "runCHECKCAST",
+                new Object[] { "abc" }, null, null));
     }
 
     public void runTests() {