Merge "Compute the right catch location for the debugger."
diff --git a/build/Android.common_test.mk b/build/Android.common_test.mk
index 547e92e..f436962 100644
--- a/build/Android.common_test.mk
+++ b/build/Android.common_test.mk
@@ -100,6 +100,9 @@
 # Do you want failed tests to have their artifacts cleaned up?
 ART_TEST_RUN_TEST_ALWAYS_CLEAN ?= true
 
+# Do you want run-tests with the --debug flag
+ART_TEST_RUN_TEST_DEBUGGABLE ?= $(ART_TEST_FULL)
+
 # Define the command run on test failure. $(1) is the name of the test. Executed by the shell.
 define ART_TEST_FAILED
   ( [ -f $(ART_HOST_TEST_DIR)/skipped/$(1) ] || \
diff --git a/cmdline/cmdline_parser_test.cc b/cmdline/cmdline_parser_test.cc
index 18d8c7a..130eed2 100644
--- a/cmdline/cmdline_parser_test.cc
+++ b/cmdline/cmdline_parser_test.cc
@@ -418,8 +418,8 @@
   * Test successes
   */
   {
-    EXPECT_SINGLE_PARSE_VALUE(true, "-Xjit", M::UseJIT);
-    EXPECT_SINGLE_PARSE_VALUE(false, "-Xnojit", M::UseJIT);
+    EXPECT_SINGLE_PARSE_VALUE(true, "-Xusejit:true", M::UseJIT);
+    EXPECT_SINGLE_PARSE_VALUE(false, "-Xusejit:false", M::UseJIT);
   }
   {
     EXPECT_SINGLE_PARSE_VALUE(MemoryKiB(16 * KB), "-Xjitcodecachesize:16K", M::JITCodeCacheCapacity);
diff --git a/compiler/Android.mk b/compiler/Android.mk
index 86a27c1..0906753 100644
--- a/compiler/Android.mk
+++ b/compiler/Android.mk
@@ -48,6 +48,12 @@
 	dex/quick/mips/int_mips.cc \
 	dex/quick/mips/target_mips.cc \
 	dex/quick/mips/utility_mips.cc \
+	dex/quick/mips64/assemble_mips64.cc \
+	dex/quick/mips64/call_mips64.cc \
+	dex/quick/mips64/fp_mips64.cc \
+	dex/quick/mips64/int_mips64.cc \
+	dex/quick/mips64/target_mips64.cc \
+	dex/quick/mips64/utility_mips64.cc \
 	dex/quick/mir_to_lir.cc \
 	dex/quick/quick_compiler.cc \
 	dex/quick/ralloc_util.cc \
@@ -83,6 +89,7 @@
 	jni/quick/arm/calling_convention_arm.cc \
 	jni/quick/arm64/calling_convention_arm64.cc \
 	jni/quick/mips/calling_convention_mips.cc \
+	jni/quick/mips64/calling_convention_mips64.cc \
 	jni/quick/x86/calling_convention_x86.cc \
 	jni/quick/x86_64/calling_convention_x86_64.cc \
 	jni/quick/calling_convention.cc \
@@ -154,6 +161,7 @@
   dex/quick/arm/arm_lir.h \
   dex/quick/arm64/arm64_lir.h \
   dex/quick/mips/mips_lir.h \
+  dex/quick/mips64/mips64_lir.h \
   dex/quick/resource_mask.h \
   dex/compiler_enums.h \
   dex/global_value_numbering.h \
diff --git a/compiler/dex/quick/codegen_util.cc b/compiler/dex/quick/codegen_util.cc
index 0be9fd4..029c0ca 100644
--- a/compiler/dex/quick/codegen_util.cc
+++ b/compiler/dex/quick/codegen_util.cc
@@ -541,6 +541,7 @@
         break;
       case kArm64:
       case kMips:
+      case kMips64:
         bx_offset = tab_rec->anchor->offset;
         break;
       default: LOG(FATAL) << "Unexpected instruction set: " << cu_->instruction_set;
@@ -1067,7 +1068,10 @@
     return lhs.LiteralOffset() < rhs.LiteralOffset();
   });
 
-  std::unique_ptr<std::vector<uint8_t>> cfi_info(ReturnFrameDescriptionEntry());
+  std::unique_ptr<std::vector<uint8_t>> cfi_info(
+      cu_->compiler_driver->GetCompilerOptions().GetGenerateGDBInformation() ?
+          ReturnFrameDescriptionEntry() :
+          nullptr);
   ArrayRef<const uint8_t> cfi_ref;
   if (cfi_info.get() != nullptr) {
     cfi_ref = ArrayRef<const uint8_t>(*cfi_info);
@@ -1200,6 +1204,7 @@
   LIR* load_pc_rel = OpPcRelLoad(TargetPtrReg(symbolic_reg), data_target);
   AppendLIR(load_pc_rel);
   DCHECK_NE(cu_->instruction_set, kMips) << reinterpret_cast<void*>(data_target);
+  DCHECK_NE(cu_->instruction_set, kMips64) << reinterpret_cast<void*>(data_target);
 }
 
 void Mir2Lir::LoadMethodAddress(const MethodReference& target_method, InvokeType type,
@@ -1217,6 +1222,7 @@
   LIR* load_pc_rel = OpPcRelLoad(TargetReg(symbolic_reg, kRef), data_target);
   AppendLIR(load_pc_rel);
   DCHECK_NE(cu_->instruction_set, kMips) << reinterpret_cast<void*>(data_target);
+  DCHECK_NE(cu_->instruction_set, kMips64) << reinterpret_cast<void*>(data_target);
 }
 
 void Mir2Lir::LoadClassType(const DexFile& dex_file, uint32_t type_idx,
diff --git a/compiler/dex/quick/gen_common.cc b/compiler/dex/quick/gen_common.cc
index afae89d..e57889a 100644
--- a/compiler/dex/quick/gen_common.cc
+++ b/compiler/dex/quick/gen_common.cc
@@ -482,6 +482,7 @@
         r_val = AllocTemp();
         break;
       case kMips:
+      case kMips64:
         r_val = AllocTemp();
         break;
       default: LOG(FATAL) << "Unexpected instruction set: " << cu_->instruction_set;
@@ -1695,7 +1696,8 @@
     StoreValue(rl_dest, rl_result);
   } else {
     bool done = false;      // Set to true if we happen to find a way to use a real instruction.
-    if (cu_->instruction_set == kMips || cu_->instruction_set == kArm64) {
+    if (cu_->instruction_set == kMips || cu_->instruction_set == kMips64 ||
+        cu_->instruction_set == kArm64) {
       rl_src1 = LoadValue(rl_src1, kCoreReg);
       rl_src2 = LoadValue(rl_src2, kCoreReg);
       if (check_zero && (flags & MIR_IGNORE_DIV_ZERO_CHECK) == 0) {
@@ -1990,7 +1992,8 @@
       }
 
       bool done = false;
-      if (cu_->instruction_set == kMips || cu_->instruction_set == kArm64) {
+      if (cu_->instruction_set == kMips || cu_->instruction_set == kMips64 ||
+          cu_->instruction_set == kArm64) {
         rl_src = LoadValue(rl_src, kCoreReg);
         rl_result = GenDivRemLit(rl_dest, rl_src.reg, lit, is_div);
         done = true;
diff --git a/compiler/dex/quick/gen_invoke.cc b/compiler/dex/quick/gen_invoke.cc
index 01f1d37..6b553fd 100755
--- a/compiler/dex/quick/gen_invoke.cc
+++ b/compiler/dex/quick/gen_invoke.cc
@@ -222,7 +222,8 @@
                                                       RegLocation arg0, RegLocation arg1,
                                                       bool safepoint_pc) {
   RegStorage r_tgt = CallHelperSetup(trampoline);
-  if (cu_->instruction_set == kArm64 || cu_->instruction_set == kX86_64) {
+  if (cu_->instruction_set == kArm64 || cu_->instruction_set == kMips64 ||
+      cu_->instruction_set == kX86_64) {
     RegStorage arg0_reg = TargetReg((arg0.fp) ? kFArg0 : kArg0, arg0);
 
     RegStorage arg1_reg;
@@ -900,8 +901,8 @@
 }
 
 bool Mir2Lir::GenInlinedReferenceGetReferent(CallInfo* info) {
-  if (cu_->instruction_set == kMips) {
-    // TODO - add Mips implementation
+  if (cu_->instruction_set == kMips || cu_->instruction_set == kMips64) {
+    // TODO: add Mips and Mips64 implementations.
     return false;
   }
 
@@ -1028,8 +1029,8 @@
 
 // Generates an inlined String.is_empty or String.length.
 bool Mir2Lir::GenInlinedStringIsEmptyOrLength(CallInfo* info, bool is_empty) {
-  if (cu_->instruction_set == kMips) {
-    // TODO - add Mips implementation
+  if (cu_->instruction_set == kMips || cu_->instruction_set == kMips64) {
+    // TODO: add Mips and Mips64 implementations.
     return false;
   }
   // dst = src.length();
@@ -1060,8 +1061,8 @@
 }
 
 bool Mir2Lir::GenInlinedReverseBytes(CallInfo* info, OpSize size) {
-  if (cu_->instruction_set == kMips) {
-    // TODO - add Mips implementation.
+  if (cu_->instruction_set == kMips || cu_->instruction_set == kMips64) {
+    // TODO: add Mips and Mips64 implementations.
     return false;
   }
   RegLocation rl_dest = IsWide(size) ? InlineTargetWide(info) : InlineTarget(info);  // result reg
@@ -1195,8 +1196,8 @@
 }
 
 bool Mir2Lir::GenInlinedFloatCvt(CallInfo* info) {
-  if (cu_->instruction_set == kMips) {
-    // TODO - add Mips implementation
+  if (cu_->instruction_set == kMips || cu_->instruction_set == kMips64) {
+    // TODO: add Mips and Mips64 implementations.
     return false;
   }
   RegLocation rl_dest = InlineTarget(info);
@@ -1210,8 +1211,8 @@
 }
 
 bool Mir2Lir::GenInlinedDoubleCvt(CallInfo* info) {
-  if (cu_->instruction_set == kMips) {
-    // TODO - add Mips implementation
+  if (cu_->instruction_set == kMips || cu_->instruction_set == kMips64) {
+    // TODO: add Mips and Mips64 implementations.
     return false;
   }
   RegLocation rl_dest = InlineTargetWide(info);
@@ -1281,8 +1282,8 @@
 
 /* Fast string.compareTo(Ljava/lang/string;)I. */
 bool Mir2Lir::GenInlinedStringCompareTo(CallInfo* info) {
-  if (cu_->instruction_set == kMips) {
-    // TODO - add Mips implementation
+  if (cu_->instruction_set == kMips || cu_->instruction_set == kMips64) {
+    // TODO: add Mips and Mips64 implementations.
     return false;
   }
   ClobberCallerSave();
@@ -1336,8 +1337,8 @@
 
 bool Mir2Lir::GenInlinedUnsafeGet(CallInfo* info,
                                   bool is_long, bool is_volatile) {
-  if (cu_->instruction_set == kMips) {
-    // TODO - add Mips implementation
+  if (cu_->instruction_set == kMips || cu_->instruction_set == kMips64) {
+    // TODO: add Mips and Mips64 implementations.
     return false;
   }
   // Unused - RegLocation rl_src_unsafe = info->args[0];
@@ -1381,8 +1382,8 @@
 
 bool Mir2Lir::GenInlinedUnsafePut(CallInfo* info, bool is_long,
                                   bool is_object, bool is_volatile, bool is_ordered) {
-  if (cu_->instruction_set == kMips) {
-    // TODO - add Mips implementation
+  if (cu_->instruction_set == kMips || cu_->instruction_set == kMips64) {
+    // TODO: add Mips and Mips64 implementations.
     return false;
   }
   // Unused - RegLocation rl_src_unsafe = info->args[0];
diff --git a/compiler/dex/quick/mips64/assemble_mips64.cc b/compiler/dex/quick/mips64/assemble_mips64.cc
new file mode 100644
index 0000000..17a0ef1
--- /dev/null
+++ b/compiler/dex/quick/mips64/assemble_mips64.cc
@@ -0,0 +1,898 @@
+/*
+ * Copyright (C) 2015 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "codegen_mips64.h"
+
+#include "base/logging.h"
+#include "dex/compiler_ir.h"
+#include "dex/quick/mir_to_lir-inl.h"
+#include "mips64_lir.h"
+
+namespace art {
+
+#define MAX_ASSEMBLER_RETRIES 50
+
+/*
+ * opcode: Mips64OpCode enum
+ * skeleton: pre-designated bit-pattern for this opcode
+ * k0: key to applying ds/de
+ * ds: dest start bit position
+ * de: dest end bit position
+ * k1: key to applying s1s/s1e
+ * s1s: src1 start bit position
+ * s1e: src1 end bit position
+ * k2: key to applying s2s/s2e
+ * s2s: src2 start bit position
+ * s2e: src2 end bit position
+ * operands: number of operands (for sanity check purposes)
+ * name: mnemonic name
+ * fmt: for pretty-printing
+ */
+#define ENCODING_MAP(opcode, skeleton, k0, ds, de, k1, s1s, s1e, k2, s2s, s2e, \
+                     k3, k3s, k3e, flags, name, fmt, size) \
+        {skeleton, {{k0, ds, de}, {k1, s1s, s1e}, {k2, s2s, s2e}, \
+                    {k3, k3s, k3e}}, opcode, flags, name, fmt, size}
+
+/* Instruction dump string format keys: !pf, where "!" is the start
+ * of the key, "p" is which numeric operand to use and "f" is the
+ * print format.
+ *
+ * [p]ositions:
+ *     0 -> operands[0] (dest)
+ *     1 -> operands[1] (src1)
+ *     2 -> operands[2] (src2)
+ *     3 -> operands[3] (extra)
+ *
+ * [f]ormats:
+ *     h -> 4-digit hex
+ *     d -> decimal
+ *     E -> decimal*4
+ *     F -> decimal*2
+ *     c -> branch condition (beq, bne, etc.)
+ *     t -> pc-relative target
+ *     T -> pc-region target
+ *     u -> 1st half of bl[x] target
+ *     v -> 2nd half ob bl[x] target
+ *     R -> register list
+ *     s -> single precision floating point register
+ *     S -> double precision floating point register
+ *     m -> Thumb2 modified immediate
+ *     n -> complimented Thumb2 modified immediate
+ *     M -> Thumb2 16-bit zero-extended immediate
+ *     b -> 4-digit binary
+ *     N -> append a NOP
+ *
+ *  [!] escape.  To insert "!", use "!!"
+ */
+/* NOTE: must be kept in sync with enum Mips64Opcode from mips64_lir.h */
+/*
+ * TUNING: We're currently punting on the branch delay slots.  All branch
+ * instructions in this map are given a size of 8, which during assembly
+ * is expanded to include a nop.  This scheme should be replaced with
+ * an assembler pass to fill those slots when possible.
+ */
+const Mips64EncodingMap Mips64Mir2Lir::EncodingMap[kMips64Last] = {
+    ENCODING_MAP(kMips6432BitData, 0x00000000,
+                 kFmtBitBlt, 31, 0, kFmtUnused, -1, -1, kFmtUnused, -1, -1,
+                 kFmtUnused, -1, -1, IS_UNARY_OP,
+                 "data", "0x!0h(!0d)", 4),
+    ENCODING_MAP(kMips64Addiu, 0x24000000,
+                 kFmtBitBlt, 20, 16, kFmtBitBlt, 25, 21, kFmtBitBlt, 15, 0,
+                 kFmtUnused, -1, -1, IS_TERTIARY_OP | REG_DEF0_USE1,
+                 "addiu", "!0r,!1r,0x!2h(!2d)", 4),
+    ENCODING_MAP(kMips64Addu, 0x00000021,
+                 kFmtBitBlt, 15, 11, kFmtBitBlt, 25, 21, kFmtBitBlt, 20, 16,
+                 kFmtUnused, -1, -1, IS_TERTIARY_OP | REG_DEF0_USE12,
+                 "addu", "!0r,!1r,!2r", 4),
+    ENCODING_MAP(kMips64And, 0x00000024,
+                 kFmtBitBlt, 15, 11, kFmtBitBlt, 25, 21, kFmtBitBlt, 20, 16,
+                 kFmtUnused, -1, -1, IS_TERTIARY_OP | REG_DEF0_USE12,
+                 "and", "!0r,!1r,!2r", 4),
+    ENCODING_MAP(kMips64Andi, 0x30000000,
+                 kFmtBitBlt, 20, 16, kFmtBitBlt, 25, 21, kFmtBitBlt, 15, 0,
+                 kFmtUnused, -1, -1, IS_TERTIARY_OP | REG_DEF0_USE1,
+                 "andi", "!0r,!1r,0x!2h(!2d)", 4),
+    ENCODING_MAP(kMips64B, 0x10000000,
+                 kFmtBitBlt, 15, 0, kFmtUnused, -1, -1, kFmtUnused, -1, -1,
+                 kFmtUnused, -1, -1, IS_UNARY_OP | IS_BRANCH | NEEDS_FIXUP,
+                 "b", "!0t!0N", 8),
+    ENCODING_MAP(kMips64Bal, 0x04110000,
+                 kFmtBitBlt, 15, 0, kFmtUnused, -1, -1, kFmtUnused, -1, -1,
+                 kFmtUnused, -1, -1, IS_UNARY_OP | IS_BRANCH | REG_DEF_LR |
+                 NEEDS_FIXUP, "bal", "!0t!0N", 8),
+    ENCODING_MAP(kMips64Beq, 0x10000000,
+                 kFmtBitBlt, 25, 21, kFmtBitBlt, 20, 16, kFmtBitBlt, 15, 0,
+                 kFmtUnused, -1, -1, IS_BINARY_OP | IS_BRANCH | REG_USE01 |
+                 NEEDS_FIXUP, "beq", "!0r,!1r,!2t!0N", 8),
+    ENCODING_MAP(kMips64Beqz, 0x10000000,  // Same as beq above with t = $zero.
+                 kFmtBitBlt, 25, 21, kFmtBitBlt, 15, 0, kFmtUnused, -1, -1,
+                 kFmtUnused, -1, -1, IS_UNARY_OP | IS_BRANCH | REG_USE0 |
+                 NEEDS_FIXUP, "beqz", "!0r,!1t!0N", 8),
+    ENCODING_MAP(kMips64Bgez, 0x04010000,
+                 kFmtBitBlt, 25, 21, kFmtBitBlt, 15, 0, kFmtUnused, -1, -1,
+                 kFmtUnused, -1, -1, IS_UNARY_OP | IS_BRANCH | REG_USE0 |
+                 NEEDS_FIXUP, "bgez", "!0r,!1t!0N", 8),
+    ENCODING_MAP(kMips64Bgtz, 0x1c000000,
+                 kFmtBitBlt, 25, 21, kFmtBitBlt, 15, 0, kFmtUnused, -1, -1,
+                 kFmtUnused, -1, -1, IS_UNARY_OP | IS_BRANCH | REG_USE0 |
+                 NEEDS_FIXUP, "bgtz", "!0r,!1t!0N", 8),
+    ENCODING_MAP(kMips64Blez, 0x18000000,
+                 kFmtBitBlt, 25, 21, kFmtBitBlt, 15, 0, kFmtUnused, -1, -1,
+                 kFmtUnused, -1, -1, IS_UNARY_OP | IS_BRANCH | REG_USE0 |
+                 NEEDS_FIXUP, "blez", "!0r,!1t!0N", 8),
+    ENCODING_MAP(kMips64Bltz, 0x04000000,
+                 kFmtBitBlt, 25, 21, kFmtBitBlt, 15, 0, kFmtUnused, -1, -1,
+                 kFmtUnused, -1, -1, IS_UNARY_OP | IS_BRANCH | REG_USE0 |
+                 NEEDS_FIXUP, "bltz", "!0r,!1t!0N", 8),
+    ENCODING_MAP(kMips64Bnez, 0x14000000,  // Same as bne below with t = $zero.
+                 kFmtBitBlt, 25, 21, kFmtBitBlt, 15, 0, kFmtUnused, -1, -1,
+                 kFmtUnused, -1, -1, IS_UNARY_OP | IS_BRANCH | REG_USE0 |
+                 NEEDS_FIXUP, "bnez", "!0r,!1t!0N", 8),
+    ENCODING_MAP(kMips64Bne, 0x14000000,
+                 kFmtBitBlt, 25, 21, kFmtBitBlt, 20, 16, kFmtBitBlt, 15, 0,
+                 kFmtUnused, -1, -1, IS_BINARY_OP | IS_BRANCH | REG_USE01 |
+                 NEEDS_FIXUP, "bne", "!0r,!1r,!2t!0N", 8),
+    ENCODING_MAP(kMips64Break, 0x0000000d,
+                 kFmtBitBlt, 25, 6, kFmtUnused, -1, -1, kFmtUnused, -1, -1,
+                 kFmtUnused, -1, -1, IS_UNARY_OP, "break", "!0d", 4),
+    ENCODING_MAP(kMips64Daddiu, 0x64000000,
+                 kFmtBitBlt, 20, 16, kFmtBitBlt, 25, 21, kFmtBitBlt, 15, 0,
+                 kFmtUnused, -1, -1, IS_TERTIARY_OP | REG_DEF0_USE1,
+                 "daddiu", "!0r,!1r,0x!2h(!2d)", 4),
+    ENCODING_MAP(kMips64Daddu, 0x0000002d,
+                 kFmtBitBlt, 15, 11, kFmtBitBlt, 25, 21, kFmtBitBlt, 20, 16,
+                 kFmtUnused, -1, -1, IS_TERTIARY_OP | REG_DEF0_USE12,
+                 "daddu", "!0r,!1r,!2r", 4),
+    ENCODING_MAP(kMips64Dahi, 0x04060000,
+                 kFmtBitBlt, 25, 21, kFmtBitBlt, 15, 0, kFmtUnused, -1, -1,
+                 kFmtUnused, -1, -1, IS_BINARY_OP | REG_DEF0_USE0,
+                 "dahi", "!0r,0x!1h(!1d)", 4),
+    ENCODING_MAP(kMips64Dati, 0x041E0000,
+                 kFmtBitBlt, 25, 21, kFmtBitBlt, 15, 0, kFmtUnused, -1, -1,
+                 kFmtUnused, -1, -1, IS_BINARY_OP | REG_DEF0_USE0,
+                 "dati", "!0r,0x!1h(!1d)", 4),
+    ENCODING_MAP(kMips64Daui, 0x74000000,
+                 kFmtBitBlt, 20, 16, kFmtBitBlt, 25, 21, kFmtBitBlt, 15, 0,
+                 kFmtUnused, -1, -1, IS_TERTIARY_OP | REG_DEF0_USE1,
+                 "daui", "!0r,!1r,0x!2h(!2d)", 4),
+    ENCODING_MAP(kMips64Ddiv, 0x0000009e,
+                 kFmtBitBlt, 15, 11, kFmtBitBlt, 25, 21, kFmtBitBlt, 20, 16,
+                 kFmtUnused, -1, -1, IS_TERTIARY_OP | REG_DEF0_USE12,
+                 "ddiv", "!0r,!1r,!2r", 4),
+    ENCODING_MAP(kMips64Div, 0x0000009a,
+                 kFmtBitBlt, 15, 11, kFmtBitBlt, 25, 21, kFmtBitBlt, 20, 16,
+                 kFmtUnused, -1, -1, IS_TERTIARY_OP | REG_DEF0_USE12,
+                 "div", "!0r,!1r,!2r", 4),
+    ENCODING_MAP(kMips64Dmod, 0x000000de,
+                 kFmtBitBlt, 15, 11, kFmtBitBlt, 25, 21, kFmtBitBlt, 20, 16,
+                 kFmtUnused, -1, -1, IS_TERTIARY_OP | REG_DEF0_USE12,
+                 "dmod", "!0r,!1r,!2r", 4),
+    ENCODING_MAP(kMips64Dmul, 0x0000009c,
+                 kFmtBitBlt, 15, 11, kFmtBitBlt, 25, 21, kFmtBitBlt, 20, 16,
+                 kFmtUnused, -1, -1, IS_TERTIARY_OP | REG_DEF0_USE12,
+                 "dmul", "!0r,!1r,!2r", 4),
+    ENCODING_MAP(kMips64Dmfc1, 0x44200000,
+                 kFmtBitBlt, 20, 16, kFmtDfp, 15, 11, kFmtUnused, -1, -1,
+                 kFmtUnused, -1, -1, IS_BINARY_OP | REG_DEF0_USE1,
+                 "dmfc1", "!0r,!1s", 4),
+    ENCODING_MAP(kMips64Dmtc1, 0x44a00000,
+                 kFmtBitBlt, 20, 16, kFmtDfp, 15, 11, kFmtUnused, -1, -1,
+                 kFmtUnused, -1, -1, IS_BINARY_OP | REG_USE0 | REG_DEF1,
+                 "dmtc1", "!0r,!1s", 4),
+    ENCODING_MAP(kMips64Drotr32, 0x0000003e | (1 << 21),
+                 kFmtBitBlt, 15, 11, kFmtBitBlt, 20, 16, kFmtBitBlt, 10, 6,
+                 kFmtUnused, -1, -1, IS_TERTIARY_OP | REG_DEF0_USE1,
+                 "drotr32", "!0r,!1r,0x!2h(!2d)", 4),
+    ENCODING_MAP(kMips64Dsll, 0x00000038,
+                 kFmtBitBlt, 15, 11, kFmtBitBlt, 20, 16, kFmtBitBlt, 10, 6,
+                 kFmtUnused, -1, -1, IS_TERTIARY_OP | REG_DEF0_USE1,
+                 "dsll", "!0r,!1r,0x!2h(!2d)", 4),
+    ENCODING_MAP(kMips64Dsll32, 0x0000003c,
+                 kFmtBitBlt, 15, 11, kFmtBitBlt, 20, 16, kFmtBitBlt, 10, 6,
+                 kFmtUnused, -1, -1, IS_TERTIARY_OP | REG_DEF0_USE1,
+                 "dsll32", "!0r,!1r,0x!2h(!2d)", 4),
+    ENCODING_MAP(kMips64Dsrl, 0x0000003a,
+                 kFmtBitBlt, 15, 11, kFmtBitBlt, 20, 16, kFmtBitBlt, 10, 6,
+                 kFmtUnused, -1, -1, IS_TERTIARY_OP | REG_DEF0_USE1,
+                 "dsrl", "!0r,!1r,0x!2h(!2d)", 4),
+    ENCODING_MAP(kMips64Dsrl32, 0x0000003e,
+                 kFmtBitBlt, 15, 11, kFmtBitBlt, 20, 16, kFmtBitBlt, 10, 6,
+                 kFmtUnused, -1, -1, IS_TERTIARY_OP | REG_DEF0_USE1,
+                 "dsrl32", "!0r,!1r,0x!2h(!2d)", 4),
+    ENCODING_MAP(kMips64Dsra, 0x0000003b,
+                 kFmtBitBlt, 15, 11, kFmtBitBlt, 20, 16, kFmtBitBlt, 10, 6,
+                 kFmtUnused, -1, -1, IS_TERTIARY_OP | REG_DEF0_USE1,
+                 "dsra", "!0r,!1r,0x!2h(!2d)", 4),
+    ENCODING_MAP(kMips64Dsra32, 0x0000003f,
+                 kFmtBitBlt, 15, 11, kFmtBitBlt, 20, 16, kFmtBitBlt, 10, 6,
+                 kFmtUnused, -1, -1, IS_TERTIARY_OP | REG_DEF0_USE1,
+                 "dsra32", "!0r,!1r,0x!2h(!2d)", 4),
+    ENCODING_MAP(kMips64Dsllv, 0x00000014,
+                 kFmtBitBlt, 15, 11, kFmtBitBlt, 20, 16, kFmtBitBlt, 25, 21,
+                 kFmtUnused, -1, -1, IS_TERTIARY_OP | REG_DEF0_USE12,
+                 "dsllv", "!0r,!1r,!2r", 4),
+    ENCODING_MAP(kMips64Dsrlv, 0x00000016,
+                 kFmtBitBlt, 15, 11, kFmtBitBlt, 20, 16, kFmtBitBlt, 25, 21,
+                 kFmtUnused, -1, -1, IS_TERTIARY_OP | REG_DEF0_USE12,
+                 "dsrlv", "!0r,!1r,!2r", 4),
+    ENCODING_MAP(kMips64Dsrav, 0x00000017,
+                 kFmtBitBlt, 15, 11, kFmtBitBlt, 20, 16, kFmtBitBlt, 25, 21,
+                 kFmtUnused, -1, -1, IS_TERTIARY_OP | REG_DEF0_USE12,
+                 "dsrav", "!0r,!1r,!2r", 4),
+    ENCODING_MAP(kMips64Dsubu, 0x0000002f,
+                 kFmtBitBlt, 15, 11, kFmtBitBlt, 25, 21, kFmtBitBlt, 20, 16,
+                 kFmtUnused, -1, -1, IS_TERTIARY_OP | REG_DEF0_USE12,
+                 "dsubu", "!0r,!1r,!2r", 4),
+    ENCODING_MAP(kMips64Ext, 0x7c000000,
+                 kFmtBitBlt, 20, 16, kFmtBitBlt, 25, 21, kFmtBitBlt, 10, 6,
+                 kFmtBitBlt, 15, 11, IS_QUAD_OP | REG_DEF0 | REG_USE1,
+                 "ext", "!0r,!1r,!2d,!3D", 4),
+    ENCODING_MAP(kMips64Faddd, 0x46200000,
+                 kFmtDfp, 10, 6, kFmtDfp, 15, 11, kFmtDfp, 20, 16,
+                 kFmtUnused, -1, -1, IS_TERTIARY_OP | REG_DEF0_USE12,
+                 "add.d", "!0S,!1S,!2S", 4),
+    ENCODING_MAP(kMips64Fadds, 0x46000000,
+                 kFmtSfp, 10, 6, kFmtSfp, 15, 11, kFmtSfp, 20, 16,
+                 kFmtUnused, -1, -1, IS_TERTIARY_OP | REG_DEF0_USE12,
+                 "add.s", "!0s,!1s,!2s", 4),
+    ENCODING_MAP(kMips64Fdivd, 0x46200003,
+                 kFmtDfp, 10, 6, kFmtDfp, 15, 11, kFmtDfp, 20, 16,
+                 kFmtUnused, -1, -1, IS_TERTIARY_OP | REG_DEF0_USE12,
+                 "div.d", "!0S,!1S,!2S", 4),
+    ENCODING_MAP(kMips64Fdivs, 0x46000003,
+                 kFmtSfp, 10, 6, kFmtSfp, 15, 11, kFmtSfp, 20, 16,
+                 kFmtUnused, -1, -1, IS_TERTIARY_OP | REG_DEF0_USE12,
+                 "div.s", "!0s,!1s,!2s", 4),
+    ENCODING_MAP(kMips64Fmuld, 0x46200002,
+                 kFmtDfp, 10, 6, kFmtDfp, 15, 11, kFmtDfp, 20, 16,
+                 kFmtUnused, -1, -1, IS_TERTIARY_OP | REG_DEF0_USE12,
+                 "mul.d", "!0S,!1S,!2S", 4),
+    ENCODING_MAP(kMips64Fmuls, 0x46000002,
+                 kFmtSfp, 10, 6, kFmtSfp, 15, 11, kFmtSfp, 20, 16,
+                 kFmtUnused, -1, -1, IS_TERTIARY_OP | REG_DEF0_USE12,
+                 "mul.s", "!0s,!1s,!2s", 4),
+    ENCODING_MAP(kMips64Fsubd, 0x46200001,
+                 kFmtDfp, 10, 6, kFmtDfp, 15, 11, kFmtDfp, 20, 16,
+                 kFmtUnused, -1, -1, IS_TERTIARY_OP | REG_DEF0_USE12,
+                 "sub.d", "!0S,!1S,!2S", 4),
+    ENCODING_MAP(kMips64Fsubs, 0x46000001,
+                 kFmtSfp, 10, 6, kFmtSfp, 15, 11, kFmtSfp, 20, 16,
+                 kFmtUnused, -1, -1, IS_TERTIARY_OP | REG_DEF0_USE12,
+                 "sub.s", "!0s,!1s,!2s", 4),
+    ENCODING_MAP(kMips64Fcvtsd, 0x46200020,
+                 kFmtSfp, 10, 6, kFmtDfp, 15, 11, kFmtUnused, -1, -1,
+                 kFmtUnused, -1, -1, IS_BINARY_OP | REG_DEF0_USE1,
+                 "cvt.s.d", "!0s,!1S", 4),
+    ENCODING_MAP(kMips64Fcvtsw, 0x46800020,
+                 kFmtSfp, 10, 6, kFmtSfp, 15, 11, kFmtUnused, -1, -1,
+                 kFmtUnused, -1, -1, IS_BINARY_OP | REG_DEF0_USE1,
+                 "cvt.s.w", "!0s,!1s", 4),
+    ENCODING_MAP(kMips64Fcvtds, 0x46000021,
+                 kFmtDfp, 10, 6, kFmtSfp, 15, 11, kFmtUnused, -1, -1,
+                 kFmtUnused, -1, -1, IS_BINARY_OP | REG_DEF0_USE1,
+                 "cvt.d.s", "!0S,!1s", 4),
+    ENCODING_MAP(kMips64Fcvtdw, 0x46800021,
+                 kFmtDfp, 10, 6, kFmtSfp, 15, 11, kFmtUnused, -1, -1,
+                 kFmtUnused, -1, -1, IS_BINARY_OP | REG_DEF0_USE1,
+                 "cvt.d.w", "!0S,!1s", 4),
+    ENCODING_MAP(kMips64Fcvtws, 0x46000024,
+                 kFmtSfp, 10, 6, kFmtSfp, 15, 11, kFmtUnused, -1, -1,
+                 kFmtUnused, -1, -1, IS_BINARY_OP | REG_DEF0_USE1,
+                 "cvt.w.s", "!0s,!1s", 4),
+    ENCODING_MAP(kMips64Fcvtwd, 0x46200024,
+                 kFmtSfp, 10, 6, kFmtDfp, 15, 11, kFmtUnused, -1, -1,
+                 kFmtUnused, -1, -1, IS_BINARY_OP | REG_DEF0_USE1,
+                 "cvt.w.d", "!0s,!1S", 4),
+    ENCODING_MAP(kMips64Fmovd, 0x46200006,
+                 kFmtDfp, 10, 6, kFmtDfp, 15, 11, kFmtUnused, -1, -1,
+                 kFmtUnused, -1, -1, IS_BINARY_OP | REG_DEF0_USE1,
+                 "mov.d", "!0S,!1S", 4),
+    ENCODING_MAP(kMips64Fmovs, 0x46000006,
+                 kFmtSfp, 10, 6, kFmtSfp, 15, 11, kFmtUnused, -1, -1,
+                 kFmtUnused, -1, -1, IS_BINARY_OP | REG_DEF0_USE1,
+                 "mov.s", "!0s,!1s", 4),
+    ENCODING_MAP(kMips64Fnegd, 0x46200007,
+                 kFmtDfp, 10, 6, kFmtDfp, 15, 11, kFmtUnused, -1, -1,
+                 kFmtUnused, -1, -1, IS_BINARY_OP | REG_DEF0_USE1,
+                 "neg.d", "!0S,!1S", 4),
+    ENCODING_MAP(kMips64Fnegs, 0x46000007,
+                 kFmtSfp, 10, 6, kFmtSfp, 15, 11, kFmtUnused, -1, -1,
+                 kFmtUnused, -1, -1, IS_BINARY_OP | REG_DEF0_USE1,
+                 "neg.s", "!0s,!1s", 4),
+    ENCODING_MAP(kMips64Fldc1, 0xd4000000,
+                 kFmtDfp, 20, 16, kFmtBitBlt, 15, 0, kFmtBitBlt, 25, 21,
+                 kFmtUnused, -1, -1, IS_TERTIARY_OP | REG_DEF0_USE2 | IS_LOAD,
+                 "ldc1", "!0S,!1d(!2r)", 4),
+    ENCODING_MAP(kMips64Flwc1, 0xc4000000,
+                 kFmtSfp, 20, 16, kFmtBitBlt, 15, 0, kFmtBitBlt, 25, 21,
+                 kFmtUnused, -1, -1, IS_TERTIARY_OP | REG_DEF0_USE2 | IS_LOAD,
+                 "lwc1", "!0s,!1d(!2r)", 4),
+    ENCODING_MAP(kMips64Fsdc1, 0xf4000000,
+                 kFmtDfp, 20, 16, kFmtBitBlt, 15, 0, kFmtBitBlt, 25, 21,
+                 kFmtUnused, -1, -1, IS_TERTIARY_OP | REG_USE02 | IS_STORE,
+                 "sdc1", "!0S,!1d(!2r)", 4),
+    ENCODING_MAP(kMips64Fswc1, 0xe4000000,
+                 kFmtSfp, 20, 16, kFmtBitBlt, 15, 0, kFmtBitBlt, 25, 21,
+                 kFmtUnused, -1, -1, IS_TERTIARY_OP | REG_USE02 | IS_STORE,
+                 "swc1", "!0s,!1d(!2r)", 4),
+    ENCODING_MAP(kMips64Jal, 0x0c000000,
+                 kFmtBitBlt, 25, 0, kFmtUnused, -1, -1, kFmtUnused, -1, -1,
+                 kFmtUnused, -1, -1, IS_UNARY_OP | IS_BRANCH | REG_DEF_LR,
+                 "jal", "!0T(!0E)!0N", 8),
+    ENCODING_MAP(kMips64Jalr, 0x00000009,
+                 kFmtBitBlt, 15, 11, kFmtBitBlt, 25, 21, kFmtUnused, -1, -1,
+                 kFmtUnused, -1, -1, IS_BINARY_OP | IS_BRANCH | REG_DEF0_USE1,
+                 "jalr", "!0r,!1r!0N", 8),
+    ENCODING_MAP(kMips64Lahi, 0x3c000000,
+                 kFmtBitBlt, 20, 16, kFmtBitBlt, 15, 0, kFmtUnused, -1, -1,
+                 kFmtUnused, -1, -1, IS_BINARY_OP | REG_DEF0,
+                 "lahi/lui", "!0r,0x!1h(!1d)", 4),
+    ENCODING_MAP(kMips64Lalo, 0x34000000,
+                 kFmtBitBlt, 20, 16, kFmtBitBlt, 25, 21, kFmtBitBlt, 15, 0,
+                 kFmtUnused, -1, -1, IS_TERTIARY_OP | REG_DEF0_USE1,
+                 "lalo/ori", "!0r,!1r,0x!2h(!2d)", 4),
+    ENCODING_MAP(kMips64Lb, 0x80000000,
+                 kFmtBitBlt, 20, 16, kFmtBitBlt, 15, 0, kFmtBitBlt, 25, 21,
+                 kFmtUnused, -1, -1, IS_TERTIARY_OP | REG_DEF0_USE2 | IS_LOAD,
+                 "lb", "!0r,!1d(!2r)", 4),
+    ENCODING_MAP(kMips64Lbu, 0x90000000,
+                 kFmtBitBlt, 20, 16, kFmtBitBlt, 15, 0, kFmtBitBlt, 25, 21,
+                 kFmtUnused, -1, -1, IS_TERTIARY_OP | REG_DEF0_USE2 | IS_LOAD,
+                 "lbu", "!0r,!1d(!2r)", 4),
+    ENCODING_MAP(kMips64Ld, 0xdc000000,
+                 kFmtBitBlt, 20, 16, kFmtBitBlt, 15, 0, kFmtBitBlt, 25, 21,
+                 kFmtUnused, -1, -1, IS_TERTIARY_OP | REG_DEF0_USE2 | IS_LOAD,
+                 "ld", "!0r,!1d(!2r)", 4),
+    ENCODING_MAP(kMips64Lh, 0x84000000,
+                 kFmtBitBlt, 20, 16, kFmtBitBlt, 15, 0, kFmtBitBlt, 25, 21,
+                 kFmtUnused, -1, -1, IS_TERTIARY_OP | REG_DEF0_USE2 | IS_LOAD,
+                 "lh", "!0r,!1d(!2r)", 4),
+    ENCODING_MAP(kMips64Lhu, 0x94000000,
+                 kFmtBitBlt, 20, 16, kFmtBitBlt, 15, 0, kFmtBitBlt, 25, 21,
+                 kFmtUnused, -1, -1, IS_TERTIARY_OP | REG_DEF0_USE2 | IS_LOAD,
+                 "lhu", "!0r,!1d(!2r)", 4),
+    ENCODING_MAP(kMips64Lui, 0x3c000000,
+                 kFmtBitBlt, 20, 16, kFmtBitBlt, 15, 0, kFmtUnused, -1, -1,
+                 kFmtUnused, -1, -1, IS_BINARY_OP | REG_DEF0,
+                 "lui", "!0r,0x!1h(!1d)", 4),
+    ENCODING_MAP(kMips64Lw, 0x8c000000,
+                 kFmtBitBlt, 20, 16, kFmtBitBlt, 15, 0, kFmtBitBlt, 25, 21,
+                 kFmtUnused, -1, -1, IS_TERTIARY_OP | REG_DEF0_USE2 | IS_LOAD,
+                 "lw", "!0r,!1d(!2r)", 4),
+    ENCODING_MAP(kMips64Lwu, 0x9c000000,
+                 kFmtBitBlt, 20, 16, kFmtBitBlt, 15, 0, kFmtBitBlt, 25, 21,
+                 kFmtUnused, -1, -1, IS_TERTIARY_OP | REG_DEF0_USE2 | IS_LOAD,
+                 "lwu", "!0r,!1d(!2r)", 4),
+    ENCODING_MAP(kMips64Mfc1, 0x44000000,
+                 kFmtBitBlt, 20, 16, kFmtSfp, 15, 11, kFmtUnused, -1, -1,
+                 kFmtUnused, -1, -1, IS_BINARY_OP | REG_DEF0_USE1,
+                 "mfc1", "!0r,!1s", 4),
+    ENCODING_MAP(kMips64Mtc1, 0x44800000,
+                 kFmtBitBlt, 20, 16, kFmtSfp, 15, 11, kFmtUnused, -1, -1,
+                 kFmtUnused, -1, -1, IS_BINARY_OP | REG_USE0 | REG_DEF1,
+                 "mtc1", "!0r,!1s", 4),
+    ENCODING_MAP(kMips64Move, 0x0000002d,  // Or using zero reg.
+                 kFmtBitBlt, 15, 11, kFmtBitBlt, 25, 21, kFmtUnused, -1, -1,
+                 kFmtUnused, -1, -1, IS_BINARY_OP | REG_DEF0_USE1,
+                 "move", "!0r,!1r", 4),
+    ENCODING_MAP(kMips64Mod, 0x000000da,
+                 kFmtBitBlt, 15, 11, kFmtBitBlt, 25, 21, kFmtBitBlt, 20, 16,
+                 kFmtUnused, -1, -1, IS_TERTIARY_OP | REG_DEF0_USE12,
+                 "mod", "!0r,!1r,!2r", 4),
+    ENCODING_MAP(kMips64Mul, 0x00000098,
+                 kFmtBitBlt, 15, 11, kFmtBitBlt, 25, 21, kFmtBitBlt, 20, 16,
+                 kFmtUnused, -1, -1, IS_TERTIARY_OP | REG_DEF0_USE12,
+                 "mul", "!0r,!1r,!2r", 4),
+    ENCODING_MAP(kMips64Nop, 0x00000000,
+                 kFmtUnused, -1, -1, kFmtUnused, -1, -1, kFmtUnused, -1, -1,
+                 kFmtUnused, -1, -1, NO_OPERAND,
+                 "nop", ";", 4),
+    ENCODING_MAP(kMips64Nor, 0x00000027,  // Used for "not" too.
+                 kFmtBitBlt, 15, 11, kFmtBitBlt, 25, 21, kFmtBitBlt, 20, 16,
+                 kFmtUnused, -1, -1, IS_TERTIARY_OP | REG_DEF0_USE12,
+                 "nor", "!0r,!1r,!2r", 4),
+    ENCODING_MAP(kMips64Or, 0x00000025,
+                 kFmtBitBlt, 15, 11, kFmtBitBlt, 25, 21, kFmtBitBlt, 20, 16,
+                 kFmtUnused, -1, -1, IS_TERTIARY_OP | REG_DEF0_USE12,
+                 "or", "!0r,!1r,!2r", 4),
+    ENCODING_MAP(kMips64Ori, 0x34000000,
+                 kFmtBitBlt, 20, 16, kFmtBitBlt, 25, 21, kFmtBitBlt, 15, 0,
+                 kFmtUnused, -1, -1, IS_TERTIARY_OP | REG_DEF0_USE1,
+                 "ori", "!0r,!1r,0x!2h(!2d)", 4),
+    ENCODING_MAP(kMips64Sb, 0xa0000000,
+                 kFmtBitBlt, 20, 16, kFmtBitBlt, 15, 0, kFmtBitBlt, 25, 21,
+                 kFmtUnused, -1, -1, IS_TERTIARY_OP | REG_USE02 | IS_STORE,
+                 "sb", "!0r,!1d(!2r)", 4),
+    ENCODING_MAP(kMips64Sd, 0xfc000000,
+                 kFmtBitBlt, 20, 16, kFmtBitBlt, 15, 0, kFmtBitBlt, 25, 21,
+                 kFmtUnused, -1, -1, IS_TERTIARY_OP | REG_USE02 | IS_STORE,
+                 "sd", "!0r,!1d(!2r)", 4),
+    ENCODING_MAP(kMips64Seb, 0x7c000420,
+                 kFmtBitBlt, 15, 11, kFmtBitBlt, 20, 16, kFmtUnused, -1, -1,
+                 kFmtUnused, -1, -1, IS_BINARY_OP | REG_DEF0_USE1,
+                 "seb", "!0r,!1r", 4),
+    ENCODING_MAP(kMips64Seh, 0x7c000620,
+                 kFmtBitBlt, 15, 11, kFmtBitBlt, 20, 16, kFmtUnused, -1, -1,
+                 kFmtUnused, -1, -1, IS_BINARY_OP | REG_DEF0_USE1,
+                 "seh", "!0r,!1r", 4),
+    ENCODING_MAP(kMips64Sh, 0xa4000000,
+                 kFmtBitBlt, 20, 16, kFmtBitBlt, 15, 0, kFmtBitBlt, 25, 21,
+                 kFmtUnused, -1, -1, IS_TERTIARY_OP | REG_USE02 | IS_STORE,
+                 "sh", "!0r,!1d(!2r)", 4),
+    ENCODING_MAP(kMips64Sll, 0x00000000,
+                 kFmtBitBlt, 15, 11, kFmtBitBlt, 20, 16, kFmtBitBlt, 10, 6,
+                 kFmtUnused, -1, -1, IS_TERTIARY_OP | REG_DEF0_USE1,
+                 "sll", "!0r,!1r,0x!2h(!2d)", 4),
+    ENCODING_MAP(kMips64Sllv, 0x00000004,
+                 kFmtBitBlt, 15, 11, kFmtBitBlt, 20, 16, kFmtBitBlt, 25, 21,
+                 kFmtUnused, -1, -1, IS_TERTIARY_OP | REG_DEF0_USE12,
+                 "sllv", "!0r,!1r,!2r", 4),
+    ENCODING_MAP(kMips64Slt, 0x0000002a,
+                 kFmtBitBlt, 15, 11, kFmtBitBlt, 25, 21, kFmtBitBlt, 20, 16,
+                 kFmtUnused, -1, -1, IS_TERTIARY_OP | REG_DEF0_USE12,
+                 "slt", "!0r,!1r,!2r", 4),
+    ENCODING_MAP(kMips64Slti, 0x28000000,
+                 kFmtBitBlt, 20, 16, kFmtBitBlt, 25, 21, kFmtBitBlt, 15, 0,
+                 kFmtUnused, -1, -1, IS_TERTIARY_OP | REG_DEF0_USE1,
+                 "slti", "!0r,!1r,0x!2h(!2d)", 4),
+    ENCODING_MAP(kMips64Sltu, 0x0000002b,
+                 kFmtBitBlt, 15, 11, kFmtBitBlt, 25, 21, kFmtBitBlt, 20, 16,
+                 kFmtUnused, -1, -1, IS_TERTIARY_OP | REG_DEF0_USE12,
+                 "sltu", "!0r,!1r,!2r", 4),
+    ENCODING_MAP(kMips64Sra, 0x00000003,
+                 kFmtBitBlt, 15, 11, kFmtBitBlt, 20, 16, kFmtBitBlt, 10, 6,
+                 kFmtUnused, -1, -1, IS_TERTIARY_OP | REG_DEF0_USE1,
+                 "sra", "!0r,!1r,0x!2h(!2d)", 4),
+    ENCODING_MAP(kMips64Srav, 0x00000007,
+                 kFmtBitBlt, 15, 11, kFmtBitBlt, 20, 16, kFmtBitBlt, 25, 21,
+                 kFmtUnused, -1, -1, IS_TERTIARY_OP | REG_DEF0_USE12,
+                 "srav", "!0r,!1r,!2r", 4),
+    ENCODING_MAP(kMips64Srl, 0x00000002,
+                 kFmtBitBlt, 15, 11, kFmtBitBlt, 20, 16, kFmtBitBlt, 10, 6,
+                 kFmtUnused, -1, -1, IS_TERTIARY_OP | REG_DEF0_USE1,
+                 "srl", "!0r,!1r,0x!2h(!2d)", 4),
+    ENCODING_MAP(kMips64Srlv, 0x00000006,
+                 kFmtBitBlt, 15, 11, kFmtBitBlt, 20, 16, kFmtBitBlt, 25, 21,
+                 kFmtUnused, -1, -1, IS_TERTIARY_OP | REG_DEF0_USE12,
+                 "srlv", "!0r,!1r,!2r", 4),
+    ENCODING_MAP(kMips64Subu, 0x00000023,  // Used for "neg" too.
+                 kFmtBitBlt, 15, 11, kFmtBitBlt, 25, 21, kFmtBitBlt, 20, 16,
+                 kFmtUnused, -1, -1, IS_TERTIARY_OP | REG_DEF0_USE12,
+                 "subu", "!0r,!1r,!2r", 4),
+    ENCODING_MAP(kMips64Sw, 0xac000000,
+                 kFmtBitBlt, 20, 16, kFmtBitBlt, 15, 0, kFmtBitBlt, 25, 21,
+                 kFmtUnused, -1, -1, IS_TERTIARY_OP | REG_USE02 | IS_STORE,
+                 "sw", "!0r,!1d(!2r)", 4),
+    ENCODING_MAP(kMips64Sync, 0x0000000f,
+                 kFmtBitBlt, 10, 6, kFmtUnused, -1, -1, kFmtUnused, -1, -1,
+                 kFmtUnused, -1, -1, IS_UNARY_OP,
+                 "sync", ";", 4),
+    ENCODING_MAP(kMips64Xor, 0x00000026,
+                 kFmtBitBlt, 15, 11, kFmtBitBlt, 25, 21, kFmtBitBlt, 20, 16,
+                 kFmtUnused, -1, -1, IS_TERTIARY_OP | REG_DEF0_USE12,
+                 "xor", "!0r,!1r,!2r", 4),
+    ENCODING_MAP(kMips64Xori, 0x38000000,
+                 kFmtBitBlt, 20, 16, kFmtBitBlt, 25, 21, kFmtBitBlt, 15, 0,
+                 kFmtUnused, -1, -1, IS_TERTIARY_OP | REG_DEF0_USE1,
+                 "xori", "!0r,!1r,0x!2h(!2d)", 4),
+    ENCODING_MAP(kMips64CurrPC, 0x04110001,
+                 kFmtUnused, -1, -1, kFmtUnused, -1, -1, kFmtUnused, -1, -1,
+                 kFmtUnused, -1, -1, NO_OPERAND | IS_BRANCH | REG_DEF_LR,
+                 "addiu", "ra,pc,8", 4),
+    ENCODING_MAP(kMips64Delta, 0x67e00000,
+                 kFmtBitBlt, 20, 16, kFmtBitBlt, 15, 0, kFmtUnused, 15, 0,
+                 kFmtUnused, -1, -1, IS_QUAD_OP | REG_DEF0 | REG_USE_LR |
+                 NEEDS_FIXUP, "daddiu", "!0r,ra,0x!1h(!1d)", 4),
+    ENCODING_MAP(kMips64DeltaHi, 0x3c000000,
+                 kFmtBitBlt, 20, 16, kFmtBitBlt, 15, 0, kFmtUnused, -1, -1,
+                 kFmtUnused, -1, -1, IS_QUAD_OP | REG_DEF0 | NEEDS_FIXUP,
+                 "lui", "!0r,0x!1h(!1d)", 4),
+    ENCODING_MAP(kMips64DeltaLo, 0x34000000,
+                 kFmtBlt5_2, 16, 21, kFmtBitBlt, 15, 0, kFmtUnused, -1, -1,
+                 kFmtUnused, -1, -1, IS_QUAD_OP | REG_DEF0_USE0 | NEEDS_FIXUP,
+                 "ori", "!0r,!0r,0x!1h(!1d)", 4),
+    ENCODING_MAP(kMips64Undefined, 0x64000000,
+                 kFmtUnused, -1, -1, kFmtUnused, -1, -1, kFmtUnused, -1, -1,
+                 kFmtUnused, -1, -1, NO_OPERAND,
+                 "undefined", "", 4),
+};
+
+
+/*
+ * Convert a short-form branch to long form.  Hopefully, this won't happen
+ * very often because the PIC sequence is especially unfortunate.
+ *
+ * Orig conditional branch
+ * -----------------------
+ *      beq  rs,rt,target
+ *
+ * Long conditional branch
+ * -----------------------
+ *      bne  rs,rt,hop
+ *      bal  .+8   ; rRA <- anchor
+ *      lui  rAT, ((target-anchor) >> 16)
+ * anchor:
+ *      ori  rAT, rAT, ((target-anchor) & 0xffff)
+ *      addu rAT, rAT, rRA
+ *      jalr rZERO, rAT
+ * hop:
+ *
+ * Orig unconditional branch
+ * -------------------------
+ *      b target
+ *
+ * Long unconditional branch
+ * -----------------------
+ *      bal  .+8   ; rRA <- anchor
+ *      lui  rAT, ((target-anchor) >> 16)
+ * anchor:
+ *      ori  rAT, rAT, ((target-anchor) & 0xffff)
+ *      addu rAT, rAT, rRA
+ *      jalr rZERO, rAT
+ *
+ *
+ * NOTE: An out-of-range bal isn't supported because it should
+ * never happen with the current PIC model.
+ */
+void Mips64Mir2Lir::ConvertShortToLongBranch(LIR* lir) {
+  // For conditional branches we'll need to reverse the sense
+  bool unconditional = false;
+  int opcode = lir->opcode;
+  int dalvik_offset = lir->dalvik_offset;
+  switch (opcode) {
+    case kMips64Bal:
+      LOG(FATAL) << "long branch and link unsupported";
+      UNREACHABLE();
+    case kMips64B:
+      unconditional = true;
+      break;
+    case kMips64Beq:  opcode = kMips64Bne; break;
+    case kMips64Bne:  opcode = kMips64Beq; break;
+    case kMips64Beqz: opcode = kMips64Bnez; break;
+    case kMips64Bgez: opcode = kMips64Bltz; break;
+    case kMips64Bgtz: opcode = kMips64Blez; break;
+    case kMips64Blez: opcode = kMips64Bgtz; break;
+    case kMips64Bltz: opcode = kMips64Bgez; break;
+    case kMips64Bnez: opcode = kMips64Beqz; break;
+    default:
+      LOG(FATAL) << "Unexpected branch kind " << opcode;
+      UNREACHABLE();
+  }
+  LIR* hop_target = NULL;
+  if (!unconditional) {
+    hop_target = RawLIR(dalvik_offset, kPseudoTargetLabel);
+    LIR* hop_branch = RawLIR(dalvik_offset, opcode, lir->operands[0],
+                             lir->operands[1], 0, 0, 0, hop_target);
+    InsertLIRBefore(lir, hop_branch);
+  }
+  LIR* curr_pc = RawLIR(dalvik_offset, kMips64CurrPC);
+  InsertLIRBefore(lir, curr_pc);
+  LIR* anchor = RawLIR(dalvik_offset, kPseudoTargetLabel);
+  LIR* delta_hi = RawLIR(dalvik_offset, kMips64DeltaHi, rAT, 0, WrapPointer(anchor), 0, 0,
+                         lir->target);
+  InsertLIRBefore(lir, delta_hi);
+  InsertLIRBefore(lir, anchor);
+  LIR* delta_lo = RawLIR(dalvik_offset, kMips64DeltaLo, rAT, 0, WrapPointer(anchor), 0, 0,
+                         lir->target);
+  InsertLIRBefore(lir, delta_lo);
+  LIR* addu = RawLIR(dalvik_offset, kMips64Addu, rAT, rAT, rRA);
+  InsertLIRBefore(lir, addu);
+  LIR* jalr = RawLIR(dalvik_offset, kMips64Jalr, rZERO, rAT);
+  InsertLIRBefore(lir, jalr);
+  if (!unconditional) {
+    InsertLIRBefore(lir, hop_target);
+  }
+  NopLIR(lir);
+}
+
+/*
+ * Assemble the LIR into binary instruction format.  Note that we may
+ * discover that pc-relative displacements may not fit the selected
+ * instruction.  In those cases we will try to substitute a new code
+ * sequence or request that the trace be shortened and retried.
+ */
+AssemblerStatus Mips64Mir2Lir::AssembleInstructions(CodeOffset start_addr) {
+  LIR *lir;
+  AssemblerStatus res = kSuccess;  // Assume success.
+
+  for (lir = first_lir_insn_; lir != NULL; lir = NEXT_LIR(lir)) {
+    if (lir->opcode < 0) {
+      continue;
+    }
+
+    if (lir->flags.is_nop) {
+      continue;
+    }
+
+    if (lir->flags.fixup != kFixupNone) {
+      if (lir->opcode == kMips64Delta) {
+        /*
+         * The "Delta" pseudo-ops load the difference between
+         * two pc-relative locations into a the target register
+         * found in operands[0].  The delta is determined by
+         * (label2 - label1), where label1 is a standard
+         * kPseudoTargetLabel and is stored in operands[2].
+         * If operands[3] is null, then label2 is a kPseudoTargetLabel
+         * and is found in lir->target.  If operands[3] is non-NULL,
+         * then it is a Switch/Data table.
+         */
+        int offset1 = (reinterpret_cast<LIR*>(UnwrapPointer(lir->operands[2])))->offset;
+        EmbeddedData *tab_rec = reinterpret_cast<EmbeddedData*>(UnwrapPointer(lir->operands[3]));
+        int offset2 = tab_rec ? tab_rec->offset : lir->target->offset;
+        int delta = offset2 - offset1;
+        if ((delta & 0xffff) == delta && ((delta & 0x8000) == 0)) {
+          // Fits.
+          lir->operands[1] = delta;
+        } else {
+          // Doesn't fit - must expand to kMips64Delta[Hi|Lo] pair.
+          LIR *new_delta_hi = RawLIR(lir->dalvik_offset, kMips64DeltaHi, lir->operands[0], 0,
+                                     lir->operands[2], lir->operands[3], 0, lir->target);
+          InsertLIRBefore(lir, new_delta_hi);
+          LIR *new_delta_lo = RawLIR(lir->dalvik_offset, kMips64DeltaLo, lir->operands[0], 0,
+                                     lir->operands[2], lir->operands[3], 0, lir->target);
+          InsertLIRBefore(lir, new_delta_lo);
+          LIR *new_addu = RawLIR(lir->dalvik_offset, kMips64Daddu, lir->operands[0],
+                                 lir->operands[0], rRAd);
+          InsertLIRBefore(lir, new_addu);
+          NopLIR(lir);
+          res = kRetryAll;
+        }
+      } else if (lir->opcode == kMips64DeltaLo) {
+        int offset1 = (reinterpret_cast<LIR*>(UnwrapPointer(lir->operands[2])))->offset;
+        EmbeddedData *tab_rec = reinterpret_cast<EmbeddedData*>(UnwrapPointer(lir->operands[3]));
+        int offset2 = tab_rec ? tab_rec->offset : lir->target->offset;
+        int delta = offset2 - offset1;
+        lir->operands[1] = delta & 0xffff;
+      } else if (lir->opcode == kMips64DeltaHi) {
+        int offset1 = (reinterpret_cast<LIR*>(UnwrapPointer(lir->operands[2])))->offset;
+        EmbeddedData *tab_rec = reinterpret_cast<EmbeddedData*>(UnwrapPointer(lir->operands[3]));
+        int offset2 = tab_rec ? tab_rec->offset : lir->target->offset;
+        int delta = offset2 - offset1;
+        lir->operands[1] = (delta >> 16) & 0xffff;
+      } else if (lir->opcode == kMips64B || lir->opcode == kMips64Bal) {
+        LIR *target_lir = lir->target;
+        CodeOffset pc = lir->offset + 4;
+        CodeOffset target = target_lir->offset;
+        int delta = target - pc;
+        if (delta & 0x3) {
+          LOG(FATAL) << "PC-rel offset not multiple of 4: " << delta;
+        }
+        if (delta > 131068 || delta < -131069) {
+          res = kRetryAll;
+          ConvertShortToLongBranch(lir);
+        } else {
+          lir->operands[0] = delta >> 2;
+        }
+      } else if (lir->opcode >= kMips64Beqz && lir->opcode <= kMips64Bnez) {
+        LIR *target_lir = lir->target;
+        CodeOffset pc = lir->offset + 4;
+        CodeOffset target = target_lir->offset;
+        int delta = target - pc;
+        if (delta & 0x3) {
+          LOG(FATAL) << "PC-rel offset not multiple of 4: " << delta;
+        }
+        if (delta > 131068 || delta < -131069) {
+          res = kRetryAll;
+          ConvertShortToLongBranch(lir);
+        } else {
+          lir->operands[1] = delta >> 2;
+        }
+      } else if (lir->opcode == kMips64Beq || lir->opcode == kMips64Bne) {
+        LIR *target_lir = lir->target;
+        CodeOffset pc = lir->offset + 4;
+        CodeOffset target = target_lir->offset;
+        int delta = target - pc;
+        if (delta & 0x3) {
+          LOG(FATAL) << "PC-rel offset not multiple of 4: " << delta;
+        }
+        if (delta > 131068 || delta < -131069) {
+          res = kRetryAll;
+          ConvertShortToLongBranch(lir);
+        } else {
+          lir->operands[2] = delta >> 2;
+        }
+      } else if (lir->opcode == kMips64Jal) {
+        CodeOffset cur_pc = (start_addr + lir->offset + 4) & ~3;
+        CodeOffset target = lir->operands[0];
+        /* ensure PC-region branch can be used */
+        DCHECK_EQ((cur_pc & 0xF0000000), (target & 0xF0000000));
+        if (target & 0x3) {
+          LOG(FATAL) << "Jump target not multiple of 4: " << target;
+        }
+        lir->operands[0] =  target >> 2;
+      } else if (lir->opcode == kMips64Lahi) { /* ld address hi (via lui) */
+        LIR *target_lir = lir->target;
+        CodeOffset target = start_addr + target_lir->offset;
+        lir->operands[1] = target >> 16;
+      } else if (lir->opcode == kMips64Lalo) { /* ld address lo (via ori) */
+        LIR *target_lir = lir->target;
+        CodeOffset target = start_addr + target_lir->offset;
+        lir->operands[2] = lir->operands[2] + target;
+      }
+    }
+
+    /*
+     * If one of the pc-relative instructions expanded we'll have
+     * to make another pass.  Don't bother to fully assemble the
+     * instruction.
+     */
+    if (res != kSuccess) {
+      continue;
+    }
+    DCHECK(!IsPseudoLirOp(lir->opcode));
+    const Mips64EncodingMap *encoder = &EncodingMap[lir->opcode];
+    uint32_t bits = encoder->skeleton;
+    int i;
+    for (i = 0; i < 4; i++) {
+      uint32_t operand;
+      uint32_t value;
+      operand = lir->operands[i];
+      switch (encoder->field_loc[i].kind) {
+        case kFmtUnused:
+          break;
+        case kFmtBitBlt:
+          if (encoder->field_loc[i].start == 0 && encoder->field_loc[i].end == 31) {
+            value = operand;
+          } else {
+            value = (operand << encoder->field_loc[i].start) &
+                ((1 << (encoder->field_loc[i].end + 1)) - 1);
+          }
+          bits |= value;
+          break;
+        case kFmtBlt5_2:
+          value = (operand & 0x1f);
+          bits |= (value << encoder->field_loc[i].start);
+          bits |= (value << encoder->field_loc[i].end);
+          break;
+        case kFmtDfp: {
+          // TODO: do we need to adjust now that we're using 64BitSolo?
+          DCHECK(RegStorage::IsDouble(operand)) << ", Operand = 0x" << std::hex << operand;
+          value = (RegStorage::RegNum(operand) << encoder->field_loc[i].start) &
+              ((1 << (encoder->field_loc[i].end + 1)) - 1);
+          bits |= value;
+          break;
+        }
+        case kFmtSfp:
+          DCHECK(RegStorage::IsSingle(operand)) << ", Operand = 0x" << std::hex << operand;
+          value = (RegStorage::RegNum(operand) << encoder->field_loc[i].start) &
+              ((1 << (encoder->field_loc[i].end + 1)) - 1);
+          bits |= value;
+          break;
+        default:
+          LOG(FATAL) << "Bad encoder format: " << encoder->field_loc[i].kind;
+      }
+    }
+    // We only support little-endian MIPS64.
+    code_buffer_.push_back(bits & 0xff);
+    code_buffer_.push_back((bits >> 8) & 0xff);
+    code_buffer_.push_back((bits >> 16) & 0xff);
+    code_buffer_.push_back((bits >> 24) & 0xff);
+    // TUNING: replace with proper delay slot handling.
+    if (encoder->size == 8) {
+      DCHECK(!IsPseudoLirOp(lir->opcode));
+      const Mips64EncodingMap *encoder2 = &EncodingMap[kMips64Nop];
+      uint32_t bits2 = encoder2->skeleton;
+      code_buffer_.push_back(bits2 & 0xff);
+      code_buffer_.push_back((bits2 >> 8) & 0xff);
+      code_buffer_.push_back((bits2 >> 16) & 0xff);
+      code_buffer_.push_back((bits2 >> 24) & 0xff);
+    }
+  }
+  return res;
+}
+
+size_t Mips64Mir2Lir::GetInsnSize(LIR* lir) {
+  DCHECK(!IsPseudoLirOp(lir->opcode));
+  return EncodingMap[lir->opcode].size;
+}
+
+// LIR offset assignment.
+// TODO: consolidate w/ Arm assembly mechanism.
+int Mips64Mir2Lir::AssignInsnOffsets() {
+  LIR* lir;
+  int offset = 0;
+
+  for (lir = first_lir_insn_; lir != NULL; lir = NEXT_LIR(lir)) {
+    lir->offset = offset;
+    if (LIKELY(lir->opcode >= 0)) {
+      if (!lir->flags.is_nop) {
+        offset += lir->flags.size;
+      }
+    } else if (UNLIKELY(lir->opcode == kPseudoPseudoAlign4)) {
+      if (offset & 0x2) {
+        offset += 2;
+        lir->operands[0] = 1;
+      } else {
+        lir->operands[0] = 0;
+      }
+    }
+    // Pseudo opcodes don't consume space.
+  }
+  return offset;
+}
+
+/*
+ * Walk the compilation unit and assign offsets to instructions
+ * and literals and compute the total size of the compiled unit.
+ * TODO: consolidate w/ Arm assembly mechanism.
+ */
+void Mips64Mir2Lir::AssignOffsets() {
+  int offset = AssignInsnOffsets();
+
+  // Const values have to be word aligned.
+  offset = RoundUp(offset, 4);
+
+  // Set up offsets for literals.
+  data_offset_ = offset;
+
+  offset = AssignLiteralOffset(offset);
+
+  offset = AssignSwitchTablesOffset(offset);
+
+  offset = AssignFillArrayDataOffset(offset);
+
+  total_size_ = offset;
+}
+
+/*
+ * Go over each instruction in the list and calculate the offset from the top
+ * before sending them off to the assembler. If out-of-range branch distance is
+ * seen rearrange the instructions a bit to correct it.
+ * TODO: consolidate w/ Arm assembly mechanism.
+ */
+void Mips64Mir2Lir::AssembleLIR() {
+  cu_->NewTimingSplit("Assemble");
+  AssignOffsets();
+  int assembler_retries = 0;
+  /*
+   * Assemble here.  Note that we generate code with optimistic assumptions
+   * and if found now to work, we'll have to redo the sequence and retry.
+   */
+
+  while (true) {
+    AssemblerStatus res = AssembleInstructions(0);
+    if (res == kSuccess) {
+      break;
+    } else {
+      assembler_retries++;
+      if (assembler_retries > MAX_ASSEMBLER_RETRIES) {
+        CodegenDump();
+        LOG(FATAL) << "Assembler error - too many retries";
+      }
+      // Redo offsets and try again.
+      AssignOffsets();
+      code_buffer_.clear();
+    }
+  }
+
+  // Install literals.
+  InstallLiteralPools();
+
+  // Install switch tables.
+  InstallSwitchTables();
+
+  // Install fill array data.
+  InstallFillArrayData();
+
+  // Create the mapping table and native offset to reference map.
+  cu_->NewTimingSplit("PcMappingTable");
+  CreateMappingTables();
+
+  cu_->NewTimingSplit("GcMap");
+  CreateNativeGcMap();
+}
+
+}  // namespace art
diff --git a/compiler/dex/quick/mips64/backend_mips64.h b/compiler/dex/quick/mips64/backend_mips64.h
new file mode 100644
index 0000000..cc30ae0
--- /dev/null
+++ b/compiler/dex/quick/mips64/backend_mips64.h
@@ -0,0 +1,32 @@
+/*
+ * Copyright (C) 2015 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef ART_COMPILER_DEX_QUICK_MIPS64_BACKEND_MIPS64_H_
+#define ART_COMPILER_DEX_QUICK_MIPS64_BACKEND_MIPS64_H_
+
+namespace art {
+
+struct CompilationUnit;
+class Mir2Lir;
+class MIRGraph;
+class ArenaAllocator;
+
+Mir2Lir* Mips64CodeGenerator(CompilationUnit* const cu, MIRGraph* const mir_graph,
+                             ArenaAllocator* const arena);
+
+}  // namespace art
+
+#endif  // ART_COMPILER_DEX_QUICK_MIPS64_BACKEND_MIPS64_H_
diff --git a/compiler/dex/quick/mips64/call_mips64.cc b/compiler/dex/quick/mips64/call_mips64.cc
new file mode 100644
index 0000000..63cef7e
--- /dev/null
+++ b/compiler/dex/quick/mips64/call_mips64.cc
@@ -0,0 +1,421 @@
+/*
+ * Copyright (C) 2015 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+/* This file contains codegen for the Mips64 ISA */
+
+#include "codegen_mips64.h"
+
+#include "base/logging.h"
+#include "dex/mir_graph.h"
+#include "dex/quick/mir_to_lir-inl.h"
+#include "entrypoints/quick/quick_entrypoints.h"
+#include "gc/accounting/card_table.h"
+#include "mips64_lir.h"
+#include "mirror/art_method.h"
+#include "mirror/object_array-inl.h"
+
+namespace art {
+
+bool Mips64Mir2Lir::GenSpecialCase(BasicBlock* bb, MIR* mir, const InlineMethod& special) {
+  // TODO
+  UNUSED(bb, mir, special);
+  return false;
+}
+
+/*
+ * The lack of pc-relative loads on Mips64 presents somewhat of a challenge
+ * for our PIC switch table strategy.  To materialize the current location
+ * we'll do a dummy JAL and reference our tables using rRA as the
+ * base register.  Note that rRA will be used both as the base to
+ * locate the switch table data and as the reference base for the switch
+ * target offsets stored in the table.  We'll use a special pseudo-instruction
+ * to represent the jal and trigger the construction of the
+ * switch table offsets (which will happen after final assembly and all
+ * labels are fixed).
+ *
+ * The test loop will look something like:
+ *
+ *   ori   r_end, rZERO, #table_size  ; size in bytes
+ *   jal   BaseLabel         ; stores "return address" (BaseLabel) in rRA
+ *   nop                     ; opportunistically fill
+ * BaseLabel:
+ *   addiu r_base, rRA, <table> - <BaseLabel>    ; table relative to BaseLabel
+     addu  r_end, r_end, r_base                   ; end of table
+ *   lw    r_val, [rSP, v_reg_off]                ; Test Value
+ * loop:
+ *   beq   r_base, r_end, done
+ *   lw    r_key, 0(r_base)
+ *   addu  r_base, 8
+ *   bne   r_val, r_key, loop
+ *   lw    r_disp, -4(r_base)
+ *   addu  rRA, r_disp
+ *   jalr  rZERO, rRA
+ * done:
+ *
+ */
+void Mips64Mir2Lir::GenLargeSparseSwitch(MIR* mir, DexOffset table_offset, RegLocation rl_src) {
+  const uint16_t* table = mir_graph_->GetTable(mir, table_offset);
+  // Add the table to the list - we'll process it later.
+  SwitchTable* tab_rec = static_cast<SwitchTable*>(arena_->Alloc(sizeof(SwitchTable),
+                                                   kArenaAllocData));
+  tab_rec->switch_mir = mir;
+  tab_rec->table = table;
+  tab_rec->vaddr = current_dalvik_offset_;
+  int elements = table[1];
+  switch_tables_.push_back(tab_rec);
+
+  // The table is composed of 8-byte key/disp pairs.
+  int byte_size = elements * 8;
+
+  int size_hi = byte_size >> 16;
+  int size_lo = byte_size & 0xffff;
+
+  RegStorage r_end = AllocTempWide();
+  if (size_hi) {
+    NewLIR2(kMips64Lui, r_end.GetReg(), size_hi);
+  }
+  // Must prevent code motion for the curr pc pair.
+  GenBarrier();  // Scheduling barrier.
+  NewLIR0(kMips64CurrPC);  // Really a jal to .+8.
+  // Now, fill the branch delay slot.
+  if (size_hi) {
+    NewLIR3(kMips64Ori, r_end.GetReg(), r_end.GetReg(), size_lo);
+  } else {
+    NewLIR3(kMips64Ori, r_end.GetReg(), rZERO, size_lo);
+  }
+  GenBarrier();  // Scheduling barrier.
+
+  // Construct BaseLabel and set up table base register.
+  LIR* base_label = NewLIR0(kPseudoTargetLabel);
+  // Remember base label so offsets can be computed later.
+  tab_rec->anchor = base_label;
+  RegStorage r_base = AllocTempWide();
+  NewLIR4(kMips64Delta, r_base.GetReg(), 0, WrapPointer(base_label), WrapPointer(tab_rec));
+  OpRegRegReg(kOpAdd, r_end, r_end, r_base);
+
+  // Grab switch test value.
+  rl_src = LoadValue(rl_src, kCoreReg);
+
+  // Test loop.
+  RegStorage r_key = AllocTemp();
+  LIR* loop_label = NewLIR0(kPseudoTargetLabel);
+  LIR* exit_branch = OpCmpBranch(kCondEq, r_base, r_end, NULL);
+  Load32Disp(r_base, 0, r_key);
+  OpRegImm(kOpAdd, r_base, 8);
+  OpCmpBranch(kCondNe, rl_src.reg, r_key, loop_label);
+  RegStorage r_disp = AllocTemp();
+  Load32Disp(r_base, -4, r_disp);
+  OpRegRegReg(kOpAdd, TargetReg(kLr, kWide), TargetReg(kLr, kWide), r_disp);
+  OpReg(kOpBx, TargetReg(kLr, kWide));
+
+  // Loop exit.
+  LIR* exit_label = NewLIR0(kPseudoTargetLabel);
+  exit_branch->target = exit_label;
+}
+
+/*
+ * Code pattern will look something like:
+ *
+ *   lw    r_val
+ *   jal   BaseLabel         ; stores "return address" (BaseLabel) in rRA
+ *   nop                     ; opportunistically fill
+ *   [subiu r_val, bias]      ; Remove bias if low_val != 0
+ *   bound check -> done
+ *   lw    r_disp, [rRA, r_val]
+ *   addu  rRA, r_disp
+ *   jalr  rZERO, rRA
+ * done:
+ */
+void Mips64Mir2Lir::GenLargePackedSwitch(MIR* mir, DexOffset table_offset, RegLocation rl_src) {
+  const uint16_t* table = mir_graph_->GetTable(mir, table_offset);
+  // Add the table to the list - we'll process it later.
+  SwitchTable* tab_rec =
+      static_cast<SwitchTable*>(arena_->Alloc(sizeof(SwitchTable), kArenaAllocData));
+  tab_rec->switch_mir = mir;
+  tab_rec->table = table;
+  tab_rec->vaddr = current_dalvik_offset_;
+  int size = table[1];
+  switch_tables_.push_back(tab_rec);
+
+  // Get the switch value.
+  rl_src = LoadValue(rl_src, kCoreReg);
+
+  // Prepare the bias.  If too big, handle 1st stage here.
+  int low_key = s4FromSwitchData(&table[2]);
+  bool large_bias = false;
+  RegStorage r_key;
+  if (low_key == 0) {
+    r_key = rl_src.reg;
+  } else if ((low_key & 0xffff) != low_key) {
+    r_key = AllocTemp();
+    LoadConstant(r_key, low_key);
+    large_bias = true;
+  } else {
+    r_key = AllocTemp();
+  }
+
+  // Must prevent code motion for the curr pc pair.
+  GenBarrier();
+  NewLIR0(kMips64CurrPC);  // Really a jal to .+8.
+  // Now, fill the branch delay slot with bias strip.
+  if (low_key == 0) {
+    NewLIR0(kMips64Nop);
+  } else {
+    if (large_bias) {
+      OpRegRegReg(kOpSub, r_key, rl_src.reg, r_key);
+    } else {
+      OpRegRegImm(kOpSub, r_key, rl_src.reg, low_key);
+    }
+  }
+  GenBarrier();  // Scheduling barrier.
+
+  // Construct BaseLabel and set up table base register.
+  LIR* base_label = NewLIR0(kPseudoTargetLabel);
+  // Remember base label so offsets can be computed later.
+  tab_rec->anchor = base_label;
+
+  // Bounds check - if < 0 or >= size continue following switch.
+  LIR* branch_over = OpCmpImmBranch(kCondHi, r_key, size-1, NULL);
+
+  // Materialize the table base pointer.
+  RegStorage r_base = AllocTempWide();
+  NewLIR4(kMips64Delta, r_base.GetReg(), 0, WrapPointer(base_label), WrapPointer(tab_rec));
+
+  // Load the displacement from the switch table.
+  RegStorage r_disp = AllocTemp();
+  LoadBaseIndexed(r_base, r_key, r_disp, 2, k32);
+
+  // Add to rAP and go.
+  OpRegRegReg(kOpAdd, TargetReg(kLr, kWide), TargetReg(kLr, kWide), r_disp);
+  OpReg(kOpBx, TargetReg(kLr, kWide));
+
+  // Branch_over target here.
+  LIR* target = NewLIR0(kPseudoTargetLabel);
+  branch_over->target = target;
+}
+
+void Mips64Mir2Lir::GenMoveException(RegLocation rl_dest) {
+  int ex_offset = Thread::ExceptionOffset<8>().Int32Value();
+  RegLocation rl_result = EvalLoc(rl_dest, kRefReg, true);
+  RegStorage reset_reg = AllocTempRef();
+  LoadRefDisp(rs_rMIPS64_SELF, ex_offset, rl_result.reg, kNotVolatile);
+  LoadConstant(reset_reg, 0);
+  StoreRefDisp(rs_rMIPS64_SELF, ex_offset, reset_reg, kNotVolatile);
+  FreeTemp(reset_reg);
+  StoreValue(rl_dest, rl_result);
+}
+
+void Mips64Mir2Lir::UnconditionallyMarkGCCard(RegStorage tgt_addr_reg) {
+  RegStorage reg_card_base = AllocTempWide();
+  RegStorage reg_card_no = AllocTempWide();
+  // NOTE: native pointer.
+  LoadWordDisp(rs_rMIPS64_SELF, Thread::CardTableOffset<8>().Int32Value(), reg_card_base);
+  OpRegRegImm(kOpLsr, reg_card_no, tgt_addr_reg, gc::accounting::CardTable::kCardShift);
+  StoreBaseIndexed(reg_card_base, reg_card_no, As32BitReg(reg_card_base), 0, kUnsignedByte);
+  FreeTemp(reg_card_base);
+  FreeTemp(reg_card_no);
+}
+
+void Mips64Mir2Lir::GenEntrySequence(RegLocation* ArgLocs, RegLocation rl_method) {
+  int spill_count = num_core_spills_ + num_fp_spills_;
+  /*
+   * On entry, rMIPS64_ARG0, rMIPS64_ARG1, rMIPS64_ARG2, rMIPS64_ARG3,
+   * rMIPS64_ARG4, rMIPS64_ARG5, rMIPS64_ARG6 & rMIPS64_ARG7  are live.
+   * Let the register allocation mechanism know so it doesn't try to
+   * use any of them when expanding the frame or flushing.
+   */
+  LockTemp(rs_rMIPS64_ARG0);
+  LockTemp(rs_rMIPS64_ARG1);
+  LockTemp(rs_rMIPS64_ARG2);
+  LockTemp(rs_rMIPS64_ARG3);
+  LockTemp(rs_rMIPS64_ARG4);
+  LockTemp(rs_rMIPS64_ARG5);
+  LockTemp(rs_rMIPS64_ARG6);
+  LockTemp(rs_rMIPS64_ARG7);
+
+  /*
+   * We can safely skip the stack overflow check if we're
+   * a leaf *and* our frame size < fudge factor.
+   */
+  bool skip_overflow_check = mir_graph_->MethodIsLeaf() && !FrameNeedsStackCheck(frame_size_,
+                                                                                 kMips64);
+  NewLIR0(kPseudoMethodEntry);
+  RegStorage check_reg = AllocTempWide();
+  RegStorage new_sp = AllocTempWide();
+  if (!skip_overflow_check) {
+    // Load stack limit.
+    LoadWordDisp(rs_rMIPS64_SELF, Thread::StackEndOffset<8>().Int32Value(), check_reg);
+  }
+  // Spill core callee saves.
+  SpillCoreRegs();
+  // NOTE: promotion of FP regs currently unsupported, thus no FP spill.
+  DCHECK_EQ(num_fp_spills_, 0);
+  const int frame_sub = frame_size_ - spill_count * 8;
+  if (!skip_overflow_check) {
+    class StackOverflowSlowPath : public LIRSlowPath {
+     public:
+      StackOverflowSlowPath(Mir2Lir* m2l, LIR* branch, size_t sp_displace)
+          : LIRSlowPath(m2l, m2l->GetCurrentDexPc(), branch, nullptr), sp_displace_(sp_displace) {
+      }
+      void Compile() OVERRIDE {
+        m2l_->ResetRegPool();
+        m2l_->ResetDefTracking();
+        GenerateTargetLabel(kPseudoThrowTarget);
+        // Load RA from the top of the frame.
+        m2l_->LoadWordDisp(rs_rMIPS64_SP, sp_displace_ - 8, rs_rRAd);
+        m2l_->OpRegImm(kOpAdd, rs_rMIPS64_SP, sp_displace_);
+        m2l_->ClobberCallerSave();
+        RegStorage r_tgt = m2l_->CallHelperSetup(kQuickThrowStackOverflow);  // Doesn't clobber LR.
+        m2l_->CallHelper(r_tgt, kQuickThrowStackOverflow, false /* MarkSafepointPC */,
+                         false /* UseLink */);
+      }
+
+     private:
+      const size_t sp_displace_;
+    };
+    OpRegRegImm(kOpSub, new_sp, rs_rMIPS64_SP, frame_sub);
+    LIR* branch = OpCmpBranch(kCondUlt, new_sp, check_reg, nullptr);
+    AddSlowPath(new(arena_)StackOverflowSlowPath(this, branch, spill_count * 8));
+    // TODO: avoid copy for small frame sizes.
+    OpRegCopy(rs_rMIPS64_SP, new_sp);  // Establish stack.
+  } else {
+    OpRegImm(kOpSub, rs_rMIPS64_SP, frame_sub);
+  }
+
+  FlushIns(ArgLocs, rl_method);
+
+  FreeTemp(rs_rMIPS64_ARG0);
+  FreeTemp(rs_rMIPS64_ARG1);
+  FreeTemp(rs_rMIPS64_ARG2);
+  FreeTemp(rs_rMIPS64_ARG3);
+  FreeTemp(rs_rMIPS64_ARG4);
+  FreeTemp(rs_rMIPS64_ARG5);
+  FreeTemp(rs_rMIPS64_ARG6);
+  FreeTemp(rs_rMIPS64_ARG7);
+}
+
+void Mips64Mir2Lir::GenExitSequence() {
+  /*
+   * In the exit path, rMIPS64_RET0/rMIPS64_RET1 are live - make sure they aren't
+   * allocated by the register utilities as temps.
+   */
+  LockTemp(rs_rMIPS64_RET0);
+  LockTemp(rs_rMIPS64_RET1);
+
+  NewLIR0(kPseudoMethodExit);
+  UnSpillCoreRegs();
+  OpReg(kOpBx, rs_rRAd);
+}
+
+void Mips64Mir2Lir::GenSpecialExitSequence() {
+  OpReg(kOpBx, rs_rRAd);
+}
+
+void Mips64Mir2Lir::GenSpecialEntryForSuspend() {
+  // Keep 16-byte stack alignment - push A0, i.e. ArtMethod* and RA.
+  core_spill_mask_ = (1u << rs_rRAd.GetRegNum());
+  num_core_spills_ = 1u;
+  fp_spill_mask_ = 0u;
+  num_fp_spills_ = 0u;
+  frame_size_ = 16u;
+  core_vmap_table_.clear();
+  fp_vmap_table_.clear();
+  OpRegImm(kOpSub, rs_rMIPS64_SP, frame_size_);
+  StoreWordDisp(rs_rMIPS64_SP, frame_size_ - 8, rs_rRAd);
+  StoreWordDisp(rs_rMIPS64_SP, 0, rs_rA0d);
+}
+
+void Mips64Mir2Lir::GenSpecialExitForSuspend() {
+  // Pop the frame. Don't pop ArtMethod*, it's no longer needed.
+  LoadWordDisp(rs_rMIPS64_SP, frame_size_ - 8, rs_rRAd);
+  OpRegImm(kOpAdd, rs_rMIPS64_SP, frame_size_);
+}
+
+/*
+ * Bit of a hack here - in the absence of a real scheduling pass,
+ * emit the next instruction in static & direct invoke sequences.
+ */
+static int Mips64NextSDCallInsn(CompilationUnit* cu, CallInfo* info ATTRIBUTE_UNUSED, int state,
+                                const MethodReference& target_method, uint32_t,
+                                uintptr_t direct_code, uintptr_t direct_method, InvokeType type) {
+  Mir2Lir* cg = static_cast<Mir2Lir*>(cu->cg.get());
+  if (direct_code != 0 && direct_method != 0) {
+    switch (state) {
+    case 0:  // Get the current Method* [sets kArg0]
+      if (direct_code != static_cast<uintptr_t>(-1)) {
+        cg->LoadConstant(cg->TargetPtrReg(kInvokeTgt), direct_code);
+      } else {
+        cg->LoadCodeAddress(target_method, type, kInvokeTgt);
+      }
+      if (direct_method != static_cast<uintptr_t>(-1)) {
+        cg->LoadConstant(cg->TargetReg(kArg0, kRef), direct_method);
+      } else {
+        cg->LoadMethodAddress(target_method, type, kArg0);
+      }
+      break;
+    default:
+      return -1;
+    }
+  } else {
+    RegStorage arg0_ref = cg->TargetReg(kArg0, kRef);
+    switch (state) {
+    case 0:  // Get the current Method* [sets kArg0]
+      // TUNING: we can save a reg copy if Method* has been promoted.
+      cg->LoadCurrMethodDirect(arg0_ref);
+      break;
+    case 1:  // Get method->dex_cache_resolved_methods_
+      cg->LoadRefDisp(arg0_ref, mirror::ArtMethod::DexCacheResolvedMethodsOffset().Int32Value(),
+                      arg0_ref,       kNotVolatile);
+      // Set up direct code if known.
+      if (direct_code != 0) {
+        if (direct_code != static_cast<uintptr_t>(-1)) {
+          cg->LoadConstant(cg->TargetPtrReg(kInvokeTgt), direct_code);
+        } else {
+          CHECK_LT(target_method.dex_method_index, target_method.dex_file->NumMethodIds());
+          cg->LoadCodeAddress(target_method, type, kInvokeTgt);
+        }
+      }
+      break;
+    case 2:  // Grab target method*
+      CHECK_EQ(cu->dex_file, target_method.dex_file);
+      cg->LoadRefDisp(arg0_ref, mirror::ObjectArray<mirror::Object>::
+                          OffsetOfElement(target_method.dex_method_index).Int32Value(), arg0_ref,
+                      kNotVolatile);
+      break;
+    case 3:  // Grab the code from the method*
+      if (direct_code == 0) {
+        int32_t offset = mirror::ArtMethod::EntryPointFromQuickCompiledCodeOffset(
+            InstructionSetPointerSize(cu->instruction_set)).Int32Value();
+        // Get the compiled code address [use *alt_from or kArg0, set kInvokeTgt]
+        cg->LoadWordDisp(arg0_ref, offset, cg->TargetPtrReg(kInvokeTgt));
+      }
+      break;
+    default:
+      return -1;
+    }
+  }
+  return state + 1;
+}
+
+NextCallInsn Mips64Mir2Lir::GetNextSDCallInsn() {
+  return Mips64NextSDCallInsn;
+}
+
+LIR* Mips64Mir2Lir::GenCallInsn(const MirMethodLoweringInfo& method_info ATTRIBUTE_UNUSED) {
+  return OpReg(kOpBlx, TargetPtrReg(kInvokeTgt));
+}
+
+}  // namespace art
diff --git a/compiler/dex/quick/mips64/codegen_mips64.h b/compiler/dex/quick/mips64/codegen_mips64.h
new file mode 100644
index 0000000..57c30d8
--- /dev/null
+++ b/compiler/dex/quick/mips64/codegen_mips64.h
@@ -0,0 +1,328 @@
+/*
+ * Copyright (C) 2015 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef ART_COMPILER_DEX_QUICK_MIPS64_CODEGEN_MIPS64_H_
+#define ART_COMPILER_DEX_QUICK_MIPS64_CODEGEN_MIPS64_H_
+
+#include "dex/quick/mir_to_lir.h"
+#include "mips64_lir.h"
+
+namespace art {
+
+struct CompilationUnit;
+
+class Mips64Mir2Lir FINAL : public Mir2Lir {
+ protected:
+  class InToRegStorageMips64Mapper : public InToRegStorageMapper {
+   public:
+    explicit InToRegStorageMips64Mapper(Mir2Lir* m2l) : m2l_(m2l), cur_arg_reg_(0) {}
+    virtual RegStorage GetNextReg(ShortyArg arg);
+    virtual void Reset() OVERRIDE {
+      cur_arg_reg_ = 0;
+    }
+   protected:
+    Mir2Lir* m2l_;
+   private:
+    size_t cur_arg_reg_;
+  };
+
+  InToRegStorageMips64Mapper in_to_reg_storage_mips64_mapper_;
+  InToRegStorageMapper* GetResetedInToRegStorageMapper() OVERRIDE {
+    in_to_reg_storage_mips64_mapper_.Reset();
+    return &in_to_reg_storage_mips64_mapper_;
+  }
+
+ public:
+  Mips64Mir2Lir(CompilationUnit* cu, MIRGraph* mir_graph, ArenaAllocator* arena);
+
+  // Required for target - codegen utilities.
+  bool SmallLiteralDivRem(Instruction::Code dalvik_opcode, bool is_div, RegLocation rl_src,
+                          RegLocation rl_dest, int lit);
+  bool EasyMultiply(RegLocation rl_src, RegLocation rl_dest, int lit) OVERRIDE;
+  void GenMultiplyByConstantFloat(RegLocation rl_dest, RegLocation rl_src1, int32_t constant)
+  OVERRIDE;
+  void GenMultiplyByConstantDouble(RegLocation rl_dest, RegLocation rl_src1, int64_t constant)
+  OVERRIDE;
+  LIR* CheckSuspendUsingLoad() OVERRIDE;
+  RegStorage LoadHelper(QuickEntrypointEnum trampoline) OVERRIDE;
+  LIR* LoadBaseDisp(RegStorage r_base, int displacement, RegStorage r_dest, OpSize size,
+                    VolatileKind is_volatile) OVERRIDE;
+  LIR* LoadBaseIndexed(RegStorage r_base, RegStorage r_index, RegStorage r_dest, int scale,
+                       OpSize size) OVERRIDE;
+  LIR* LoadConstantNoClobber(RegStorage r_dest, int value);
+  LIR* LoadConstantWide(RegStorage r_dest, int64_t value);
+  LIR* StoreBaseDisp(RegStorage r_base, int displacement, RegStorage r_src, OpSize size,
+                     VolatileKind is_volatile) OVERRIDE;
+  LIR* StoreBaseIndexed(RegStorage r_base, RegStorage r_index, RegStorage r_src, int scale,
+                        OpSize size) OVERRIDE;
+  LIR* GenAtomic64Load(RegStorage r_base, int displacement, RegStorage r_dest);
+  LIR* GenAtomic64Store(RegStorage r_base, int displacement, RegStorage r_src);
+
+  /// @copydoc Mir2Lir::UnconditionallyMarkGCCard(RegStorage)
+  void UnconditionallyMarkGCCard(RegStorage tgt_addr_reg) OVERRIDE;
+
+  // Required for target - register utilities.
+  RegStorage TargetReg(SpecialTargetRegister reg) OVERRIDE;
+  RegStorage TargetReg(SpecialTargetRegister reg, WideKind wide_kind) OVERRIDE {
+    if (wide_kind == kWide || wide_kind == kRef) {
+      return As64BitReg(TargetReg(reg));
+    } else {
+      return Check32BitReg(TargetReg(reg));
+    }
+  }
+  RegStorage TargetPtrReg(SpecialTargetRegister reg) OVERRIDE {
+    return As64BitReg(TargetReg(reg));
+  }
+  RegLocation GetReturnAlt();
+  RegLocation GetReturnWideAlt();
+  RegLocation LocCReturn();
+  RegLocation LocCReturnRef();
+  RegLocation LocCReturnDouble();
+  RegLocation LocCReturnFloat();
+  RegLocation LocCReturnWide();
+  ResourceMask GetRegMaskCommon(const RegStorage& reg) const OVERRIDE;
+  void AdjustSpillMask();
+  void ClobberCallerSave();
+  void FreeCallTemps();
+  void LockCallTemps();
+  void CompilerInitializeRegAlloc();
+
+  // Required for target - miscellaneous.
+  void AssembleLIR();
+  int AssignInsnOffsets();
+  void AssignOffsets();
+  AssemblerStatus AssembleInstructions(CodeOffset start_addr);
+  void DumpResourceMask(LIR* lir, const ResourceMask& mask, const char* prefix) OVERRIDE;
+  void SetupTargetResourceMasks(LIR* lir, uint64_t flags, ResourceMask* use_mask,
+                                ResourceMask* def_mask) OVERRIDE;
+  const char* GetTargetInstFmt(int opcode);
+  const char* GetTargetInstName(int opcode);
+  std::string BuildInsnString(const char* fmt, LIR* lir, unsigned char* base_addr);
+  ResourceMask GetPCUseDefEncoding() const OVERRIDE;
+  uint64_t GetTargetInstFlags(int opcode);
+  size_t GetInsnSize(LIR* lir) OVERRIDE;
+  bool IsUnconditionalBranch(LIR* lir);
+
+  // Get the register class for load/store of a field.
+  RegisterClass RegClassForFieldLoadStore(OpSize size, bool is_volatile) OVERRIDE;
+
+  // Required for target - Dalvik-level generators.
+  void GenShiftOpLong(Instruction::Code opcode, RegLocation rl_dest, RegLocation rl_src1,
+                      RegLocation lr_shift);
+  void GenArithImmOpLong(Instruction::Code opcode, RegLocation rl_dest, RegLocation rl_src1,
+                         RegLocation rl_src2, int flags);
+  void GenArrayGet(int opt_flags, OpSize size, RegLocation rl_array, RegLocation rl_index,
+                   RegLocation rl_dest, int scale);
+  void GenArrayPut(int opt_flags, OpSize size, RegLocation rl_array, RegLocation rl_index,
+                   RegLocation rl_src, int scale, bool card_mark);
+  void GenShiftImmOpLong(Instruction::Code opcode, RegLocation rl_dest, RegLocation rl_src1,
+                         RegLocation rl_shift, int flags);
+  void GenArithOpDouble(Instruction::Code opcode, RegLocation rl_dest, RegLocation rl_src1,
+                        RegLocation rl_src2);
+  void GenArithOpFloat(Instruction::Code opcode, RegLocation rl_dest, RegLocation rl_src1,
+                       RegLocation rl_src2);
+  void GenCmpFP(Instruction::Code opcode, RegLocation rl_dest, RegLocation rl_src1,
+                RegLocation rl_src2);
+  void GenConversion(Instruction::Code opcode, RegLocation rl_dest, RegLocation rl_src);
+  bool GenInlinedAbsFloat(CallInfo* info) OVERRIDE;
+  bool GenInlinedAbsDouble(CallInfo* info) OVERRIDE;
+  bool GenInlinedCas(CallInfo* info, bool is_long, bool is_object);
+  bool GenInlinedMinMax(CallInfo* info, bool is_min, bool is_long);
+  bool GenInlinedSqrt(CallInfo* info);
+  bool GenInlinedPeek(CallInfo* info, OpSize size);
+  bool GenInlinedPoke(CallInfo* info, OpSize size);
+  void GenIntToLong(RegLocation rl_dest, RegLocation rl_src) OVERRIDE;
+  void GenArithOpLong(Instruction::Code opcode, RegLocation rl_dest, RegLocation rl_src1,
+                      RegLocation rl_src2, int flags) OVERRIDE;
+  RegLocation GenDivRem(RegLocation rl_dest, RegStorage reg_lo, RegStorage reg_hi, bool is_div);
+  RegLocation GenDivRemLit(RegLocation rl_dest, RegStorage reg_lo, int lit, bool is_div);
+  void GenCmpLong(RegLocation rl_dest, RegLocation rl_src1, RegLocation rl_src2);
+  void GenDivZeroCheckWide(RegStorage reg);
+  void GenEntrySequence(RegLocation* ArgLocs, RegLocation rl_method);
+  void GenExitSequence();
+  void GenSpecialExitSequence() OVERRIDE;
+  void GenSpecialEntryForSuspend() OVERRIDE;
+  void GenSpecialExitForSuspend() OVERRIDE;
+  void GenFusedFPCmpBranch(BasicBlock* bb, MIR* mir, bool gt_bias, bool is_double);
+  void GenFusedLongCmpBranch(BasicBlock* bb, MIR* mir);
+  void GenSelect(BasicBlock* bb, MIR* mir);
+  void GenSelectConst32(RegStorage left_op, RegStorage right_op, ConditionCode code,
+                        int32_t true_val, int32_t false_val, RegStorage rs_dest,
+                        RegisterClass dest_reg_class) OVERRIDE;
+  bool GenMemBarrier(MemBarrierKind barrier_kind);
+  void GenMoveException(RegLocation rl_dest);
+  void GenMultiplyByTwoBitMultiplier(RegLocation rl_src, RegLocation rl_result, int lit,
+                                     int first_bit, int second_bit);
+  void GenNegDouble(RegLocation rl_dest, RegLocation rl_src);
+  void GenNegFloat(RegLocation rl_dest, RegLocation rl_src);
+  void GenLargePackedSwitch(MIR* mir, uint32_t table_offset, RegLocation rl_src);
+  void GenLargeSparseSwitch(MIR* mir, uint32_t table_offset, RegLocation rl_src);
+  bool GenSpecialCase(BasicBlock* bb, MIR* mir, const InlineMethod& special);
+
+  // Required for target - single operation generators.
+  LIR* OpUnconditionalBranch(LIR* target);
+  LIR* OpCmpBranch(ConditionCode cond, RegStorage src1, RegStorage src2, LIR* target);
+  LIR* OpCmpImmBranch(ConditionCode cond, RegStorage reg, int check_value, LIR* target);
+  LIR* OpCondBranch(ConditionCode cc, LIR* target);
+  LIR* OpDecAndBranch(ConditionCode c_code, RegStorage reg, LIR* target);
+  LIR* OpFpRegCopy(RegStorage r_dest, RegStorage r_src);
+  LIR* OpIT(ConditionCode cond, const char* guide);
+  void OpEndIT(LIR* it);
+  LIR* OpMem(OpKind op, RegStorage r_base, int disp);
+  LIR* OpPcRelLoad(RegStorage reg, LIR* target);
+  LIR* OpReg(OpKind op, RegStorage r_dest_src);
+  void OpRegCopy(RegStorage r_dest, RegStorage r_src);
+  LIR* OpRegCopyNoInsert(RegStorage r_dest, RegStorage r_src);
+  LIR* OpRegImm(OpKind op, RegStorage r_dest_src1, int value);
+  LIR* OpRegReg(OpKind op, RegStorage r_dest_src1, RegStorage r_src2);
+  LIR* OpMovRegMem(RegStorage r_dest, RegStorage r_base, int offset, MoveType move_type);
+  LIR* OpMovMemReg(RegStorage r_base, int offset, RegStorage r_src, MoveType move_type);
+  LIR* OpCondRegReg(OpKind op, ConditionCode cc, RegStorage r_dest, RegStorage r_src);
+  LIR* OpRegRegImm(OpKind op, RegStorage r_dest, RegStorage r_src1, int value);
+  LIR* OpRegRegReg(OpKind op, RegStorage r_dest, RegStorage r_src1, RegStorage r_src2);
+  LIR* OpTestSuspend(LIR* target);
+  LIR* OpVldm(RegStorage r_base, int count);
+  LIR* OpVstm(RegStorage r_base, int count);
+  void OpRegCopyWide(RegStorage dest, RegStorage src);
+
+  // TODO: collapse r_dest.
+  LIR* LoadBaseDispBody(RegStorage r_base, int displacement, RegStorage r_dest, OpSize size);
+  // TODO: collapse r_src.
+  LIR* StoreBaseDispBody(RegStorage r_base, int displacement, RegStorage r_src, OpSize size);
+  void SpillCoreRegs();
+  void UnSpillCoreRegs();
+  static const Mips64EncodingMap EncodingMap[kMips64Last];
+  bool InexpensiveConstantInt(int32_t value);
+  bool InexpensiveConstantFloat(int32_t value);
+  bool InexpensiveConstantLong(int64_t value);
+  bool InexpensiveConstantDouble(int64_t value);
+
+  bool WideGPRsAreAliases() const OVERRIDE {
+    return true;  // 64b architecture.
+  }
+  bool WideFPRsAreAliases() const OVERRIDE {
+    return true;  // 64b architecture.
+  }
+
+  LIR* InvokeTrampoline(OpKind op, RegStorage r_tgt, QuickEntrypointEnum trampoline) OVERRIDE;
+  RegLocation GenDivRem(RegLocation rl_dest, RegLocation rl_src1, RegLocation rl_src2,
+                        bool is_div, int flags) OVERRIDE;
+  RegLocation GenDivRemLit(RegLocation rl_dest, RegLocation rl_src1, int lit, bool is_div)
+  OVERRIDE;
+  NextCallInsn GetNextSDCallInsn() OVERRIDE;
+  LIR* GenCallInsn(const MirMethodLoweringInfo& method_info) OVERRIDE;
+  // Unimplemented intrinsics.
+  bool GenInlinedCharAt(CallInfo* info ATTRIBUTE_UNUSED) OVERRIDE {
+    return false;
+  }
+  bool GenInlinedAbsInt(CallInfo* info ATTRIBUTE_UNUSED) OVERRIDE {
+    return false;
+  }
+  bool GenInlinedAbsLong(CallInfo* info ATTRIBUTE_UNUSED) OVERRIDE {
+    return false;
+  }
+  bool GenInlinedIndexOf(CallInfo* info ATTRIBUTE_UNUSED, bool zero_based ATTRIBUTE_UNUSED)
+  OVERRIDE {
+    return false;
+  }
+
+ private:
+  void GenLongOp(OpKind op, RegLocation rl_dest, RegLocation rl_src1, RegLocation rl_src2);
+  void GenNotLong(RegLocation rl_dest, RegLocation rl_src);
+  void GenNegLong(RegLocation rl_dest, RegLocation rl_src);
+  void GenMulLong(RegLocation rl_dest, RegLocation rl_src1, RegLocation rl_src2);
+  void GenDivRemLong(Instruction::Code opcode, RegLocation rl_dest, RegLocation rl_src1,
+                     RegLocation rl_src2, bool is_div, int flags);
+  void GenConversionCall(QuickEntrypointEnum trampoline, RegLocation rl_dest, RegLocation rl_src,
+                         RegisterClass reg_class);
+
+  void ConvertShortToLongBranch(LIR* lir);
+
+  /**
+   * @param reg #RegStorage containing a Solo64 input register (e.g. @c a1 or @c d0).
+   * @return A Solo32 with the same register number as the @p reg (e.g. @c a1 or @c f0).
+   * @see As64BitReg
+   */
+  RegStorage As32BitReg(RegStorage reg) {
+    DCHECK(!reg.IsPair());
+    if ((kFailOnSizeError || kReportSizeError) && !reg.Is64Bit()) {
+      if (kFailOnSizeError) {
+        LOG(FATAL) << "Expected 64b register";
+      } else {
+        LOG(WARNING) << "Expected 64b register";
+        return reg;
+      }
+    }
+    RegStorage ret_val = RegStorage(RegStorage::k32BitSolo,
+                                    reg.GetRawBits() & RegStorage::kRegTypeMask);
+    DCHECK_EQ(GetRegInfo(reg)->FindMatchingView(RegisterInfo::k32SoloStorageMask)
+              ->GetReg().GetReg(),
+              ret_val.GetReg());
+    return ret_val;
+  }
+
+  RegStorage Check32BitReg(RegStorage reg) {
+    if ((kFailOnSizeError || kReportSizeError) && !reg.Is32Bit()) {
+      if (kFailOnSizeError) {
+        LOG(FATAL) << "Checked for 32b register";
+      } else {
+        LOG(WARNING) << "Checked for 32b register";
+        return As32BitReg(reg);
+      }
+    }
+    return reg;
+  }
+
+  /**
+   * @param reg #RegStorage containing a Solo32 input register (e.g. @c a1 or @c f0).
+   * @return A Solo64 with the same register number as the @p reg (e.g. @c a1 or @c d0).
+   */
+  RegStorage As64BitReg(RegStorage reg) {
+    DCHECK(!reg.IsPair());
+    if ((kFailOnSizeError || kReportSizeError) && !reg.Is32Bit()) {
+      if (kFailOnSizeError) {
+        LOG(FATAL) << "Expected 32b register";
+      } else {
+        LOG(WARNING) << "Expected 32b register";
+        return reg;
+      }
+    }
+    RegStorage ret_val = RegStorage(RegStorage::k64BitSolo,
+                                    reg.GetRawBits() & RegStorage::kRegTypeMask);
+    DCHECK_EQ(GetRegInfo(reg)->FindMatchingView(RegisterInfo::k64SoloStorageMask)
+              ->GetReg().GetReg(),
+              ret_val.GetReg());
+    return ret_val;
+  }
+
+  RegStorage Check64BitReg(RegStorage reg) {
+    if ((kFailOnSizeError || kReportSizeError) && !reg.Is64Bit()) {
+      if (kFailOnSizeError) {
+        LOG(FATAL) << "Checked for 64b register";
+      } else {
+        LOG(WARNING) << "Checked for 64b register";
+        return As64BitReg(reg);
+      }
+    }
+    return reg;
+  }
+
+  void GenBreakpoint(int code);
+};
+
+}  // namespace art
+
+#endif  // ART_COMPILER_DEX_QUICK_MIPS64_CODEGEN_MIPS64_H_
diff --git a/compiler/dex/quick/mips64/fp_mips64.cc b/compiler/dex/quick/mips64/fp_mips64.cc
new file mode 100644
index 0000000..5c8ee9c
--- /dev/null
+++ b/compiler/dex/quick/mips64/fp_mips64.cc
@@ -0,0 +1,253 @@
+/*
+ * Copyright (C) 2015 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "codegen_mips64.h"
+
+#include "base/logging.h"
+#include "dex/quick/mir_to_lir-inl.h"
+#include "entrypoints/quick/quick_entrypoints.h"
+#include "mips64_lir.h"
+
+namespace art {
+
+void Mips64Mir2Lir::GenArithOpFloat(Instruction::Code opcode, RegLocation rl_dest,
+                                    RegLocation rl_src1, RegLocation rl_src2) {
+  int op = kMips64Nop;
+  RegLocation rl_result;
+
+  /*
+   * Don't attempt to optimize register usage since these opcodes call out to
+   * the handlers.
+   */
+  switch (opcode) {
+    case Instruction::ADD_FLOAT_2ADDR:
+    case Instruction::ADD_FLOAT:
+      op = kMips64Fadds;
+      break;
+    case Instruction::SUB_FLOAT_2ADDR:
+    case Instruction::SUB_FLOAT:
+      op = kMips64Fsubs;
+      break;
+    case Instruction::DIV_FLOAT_2ADDR:
+    case Instruction::DIV_FLOAT:
+      op = kMips64Fdivs;
+      break;
+    case Instruction::MUL_FLOAT_2ADDR:
+    case Instruction::MUL_FLOAT:
+      op = kMips64Fmuls;
+      break;
+    case Instruction::REM_FLOAT_2ADDR:
+    case Instruction::REM_FLOAT:
+      FlushAllRegs();   // Send everything to home location.
+      CallRuntimeHelperRegLocationRegLocation(kQuickFmodf, rl_src1, rl_src2, false);
+      rl_result = GetReturn(kFPReg);
+      StoreValue(rl_dest, rl_result);
+      return;
+    case Instruction::NEG_FLOAT:
+      GenNegFloat(rl_dest, rl_src1);
+      return;
+    default:
+      LOG(FATAL) << "Unexpected opcode: " << opcode;
+  }
+  rl_src1 = LoadValue(rl_src1, kFPReg);
+  rl_src2 = LoadValue(rl_src2, kFPReg);
+  rl_result = EvalLoc(rl_dest, kFPReg, true);
+  NewLIR3(op, rl_result.reg.GetReg(), rl_src1.reg.GetReg(), rl_src2.reg.GetReg());
+  StoreValue(rl_dest, rl_result);
+}
+
+void Mips64Mir2Lir::GenArithOpDouble(Instruction::Code opcode, RegLocation rl_dest,
+                                     RegLocation rl_src1, RegLocation rl_src2) {
+  int op = kMips64Nop;
+  RegLocation rl_result;
+
+  switch (opcode) {
+    case Instruction::ADD_DOUBLE_2ADDR:
+    case Instruction::ADD_DOUBLE:
+      op = kMips64Faddd;
+      break;
+    case Instruction::SUB_DOUBLE_2ADDR:
+    case Instruction::SUB_DOUBLE:
+      op = kMips64Fsubd;
+      break;
+    case Instruction::DIV_DOUBLE_2ADDR:
+    case Instruction::DIV_DOUBLE:
+      op = kMips64Fdivd;
+      break;
+    case Instruction::MUL_DOUBLE_2ADDR:
+    case Instruction::MUL_DOUBLE:
+      op = kMips64Fmuld;
+      break;
+    case Instruction::REM_DOUBLE_2ADDR:
+    case Instruction::REM_DOUBLE:
+      FlushAllRegs();   // Send everything to home location.
+      CallRuntimeHelperRegLocationRegLocation(kQuickFmod, rl_src1, rl_src2, false);
+      rl_result = GetReturnWide(kFPReg);
+      StoreValueWide(rl_dest, rl_result);
+      return;
+    case Instruction::NEG_DOUBLE:
+      GenNegDouble(rl_dest, rl_src1);
+      return;
+    default:
+      LOG(FATAL) << "Unpexpected opcode: " << opcode;
+  }
+  rl_src1 = LoadValueWide(rl_src1, kFPReg);
+  DCHECK(rl_src1.wide);
+  rl_src2 = LoadValueWide(rl_src2, kFPReg);
+  DCHECK(rl_src2.wide);
+  rl_result = EvalLoc(rl_dest, kFPReg, true);
+  DCHECK(rl_dest.wide);
+  DCHECK(rl_result.wide);
+  NewLIR3(op, rl_result.reg.GetReg(), rl_src1.reg.GetReg(), rl_src2.reg.GetReg());
+  StoreValueWide(rl_dest, rl_result);
+}
+
+void Mips64Mir2Lir::GenMultiplyByConstantFloat(RegLocation rl_dest, RegLocation rl_src1,
+                                               int32_t constant) {
+  // TODO: need mips64 implementation.
+  UNUSED(rl_dest, rl_src1, constant);
+  LOG(FATAL) << "Unimplemented GenMultiplyByConstantFloat in mips64";
+}
+
+void Mips64Mir2Lir::GenMultiplyByConstantDouble(RegLocation rl_dest, RegLocation rl_src1,
+                                                int64_t constant) {
+  // TODO: need mips64 implementation.
+  UNUSED(rl_dest, rl_src1, constant);
+  LOG(FATAL) << "Unimplemented GenMultiplyByConstantDouble in mips64";
+}
+
+void Mips64Mir2Lir::GenConversion(Instruction::Code opcode, RegLocation rl_dest,
+                                  RegLocation rl_src) {
+  int op = kMips64Nop;
+  RegLocation rl_result;
+  switch (opcode) {
+    case Instruction::INT_TO_FLOAT:
+      op = kMips64Fcvtsw;
+      break;
+    case Instruction::DOUBLE_TO_FLOAT:
+      op = kMips64Fcvtsd;
+      break;
+    case Instruction::FLOAT_TO_DOUBLE:
+      op = kMips64Fcvtds;
+      break;
+    case Instruction::INT_TO_DOUBLE:
+      op = kMips64Fcvtdw;
+      break;
+    case Instruction::FLOAT_TO_INT:
+      GenConversionCall(kQuickF2iz, rl_dest, rl_src, kCoreReg);
+      return;
+    case Instruction::DOUBLE_TO_INT:
+      GenConversionCall(kQuickD2iz, rl_dest, rl_src, kCoreReg);
+      return;
+    case Instruction::LONG_TO_DOUBLE:
+      GenConversionCall(kQuickL2d, rl_dest, rl_src, kFPReg);
+      return;
+    case Instruction::FLOAT_TO_LONG:
+      GenConversionCall(kQuickF2l, rl_dest, rl_src, kCoreReg);
+      return;
+    case Instruction::LONG_TO_FLOAT:
+      GenConversionCall(kQuickL2f, rl_dest, rl_src, kFPReg);
+      return;
+    case Instruction::DOUBLE_TO_LONG:
+      GenConversionCall(kQuickD2l, rl_dest, rl_src, kCoreReg);
+      return;
+    default:
+      LOG(FATAL) << "Unexpected opcode: " << opcode;
+  }
+  if (rl_src.wide) {
+    rl_src = LoadValueWide(rl_src, kFPReg);
+  } else {
+    rl_src = LoadValue(rl_src, kFPReg);
+  }
+  rl_result = EvalLoc(rl_dest, kFPReg, true);
+  NewLIR2(op, rl_result.reg.GetReg(), rl_src.reg.GetReg());
+  if (rl_dest.wide) {
+    StoreValueWide(rl_dest, rl_result);
+  } else {
+    StoreValue(rl_dest, rl_result);
+  }
+}
+
+void Mips64Mir2Lir::GenCmpFP(Instruction::Code opcode, RegLocation rl_dest, RegLocation rl_src1,
+                             RegLocation rl_src2) {
+  bool wide = true;
+  QuickEntrypointEnum target;
+
+  switch (opcode) {
+    case Instruction::CMPL_FLOAT:
+      target = kQuickCmplFloat;
+      wide = false;
+      break;
+    case Instruction::CMPG_FLOAT:
+      target = kQuickCmpgFloat;
+      wide = false;
+      break;
+    case Instruction::CMPL_DOUBLE:
+      target = kQuickCmplDouble;
+      break;
+    case Instruction::CMPG_DOUBLE:
+      target = kQuickCmpgDouble;
+      break;
+    default:
+      LOG(FATAL) << "Unexpected opcode: " << opcode;
+      target = kQuickCmplFloat;
+  }
+  FlushAllRegs();
+  LockCallTemps();
+  if (wide) {
+    RegStorage r_tmp1(RegStorage::k64BitSolo, rMIPS64_FARG0);
+    RegStorage r_tmp2(RegStorage::k64BitSolo, rMIPS64_FARG1);
+    LoadValueDirectWideFixed(rl_src1, r_tmp1);
+    LoadValueDirectWideFixed(rl_src2, r_tmp2);
+  } else {
+    LoadValueDirectFixed(rl_src1, rs_rMIPS64_FARG0);
+    LoadValueDirectFixed(rl_src2, rs_rMIPS64_FARG1);
+  }
+  RegStorage r_tgt = LoadHelper(target);
+  // NOTE: not a safepoint.
+  OpReg(kOpBlx, r_tgt);
+  RegLocation rl_result = GetReturn(kCoreReg);
+  StoreValue(rl_dest, rl_result);
+}
+
+void Mips64Mir2Lir::GenFusedFPCmpBranch(BasicBlock* bb, MIR* mir, bool gt_bias, bool is_double) {
+  UNUSED(bb, mir, gt_bias, is_double);
+  UNIMPLEMENTED(FATAL) << "Need codegen for fused fp cmp branch";
+}
+
+void Mips64Mir2Lir::GenNegFloat(RegLocation rl_dest, RegLocation rl_src) {
+  RegLocation rl_result;
+  rl_src = LoadValue(rl_src, kFPReg);
+  rl_result = EvalLoc(rl_dest, kFPReg, true);
+  NewLIR2(kMips64Fnegs, rl_result.reg.GetReg(), rl_src.reg.GetReg());
+  StoreValue(rl_dest, rl_result);
+}
+
+void Mips64Mir2Lir::GenNegDouble(RegLocation rl_dest, RegLocation rl_src) {
+  RegLocation rl_result;
+  rl_src = LoadValueWide(rl_src, kFPReg);
+  rl_result = EvalLocWide(rl_dest, kFPReg, true);
+  NewLIR2(kMips64Fnegd, rl_result.reg.GetReg(), rl_src.reg.GetReg());
+  StoreValueWide(rl_dest, rl_result);
+}
+
+bool Mips64Mir2Lir::GenInlinedMinMax(CallInfo* info, bool is_min, bool is_long) {
+  // TODO: need Mips64 implementation.
+  UNUSED(info, is_min, is_long);
+  return false;
+}
+
+}  // namespace art
diff --git a/compiler/dex/quick/mips64/int_mips64.cc b/compiler/dex/quick/mips64/int_mips64.cc
new file mode 100644
index 0000000..9023970
--- /dev/null
+++ b/compiler/dex/quick/mips64/int_mips64.cc
@@ -0,0 +1,694 @@
+/*
+ * Copyright (C) 2015 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+/* This file contains codegen for the Mips64 ISA */
+
+#include "codegen_mips64.h"
+
+#include "base/logging.h"
+#include "dex/mir_graph.h"
+#include "dex/quick/mir_to_lir-inl.h"
+#include "dex/reg_storage_eq.h"
+#include "entrypoints/quick/quick_entrypoints.h"
+#include "mips64_lir.h"
+#include "mirror/array-inl.h"
+
+namespace art {
+
+/*
+ * Compare two 64-bit values
+ *    x = y     return  0
+ *    x < y     return -1
+ *    x > y     return  1
+ *
+ *    slt   temp, x, y;          # (x < y) ? 1:0
+ *    slt   res, y, x;           # (x > y) ? 1:0
+ *    subu  res, res, temp;      # res = -1:1:0 for [ < > = ]
+ *
+ */
+void Mips64Mir2Lir::GenCmpLong(RegLocation rl_dest, RegLocation rl_src1, RegLocation rl_src2) {
+  rl_src1 = LoadValueWide(rl_src1, kCoreReg);
+  rl_src2 = LoadValueWide(rl_src2, kCoreReg);
+  RegStorage temp = AllocTempWide();
+  RegLocation rl_result = EvalLoc(rl_dest, kCoreReg, true);
+  NewLIR3(kMips64Slt, temp.GetReg(), rl_src1.reg.GetReg(), rl_src2.reg.GetReg());
+  NewLIR3(kMips64Slt, rl_result.reg.GetReg(), rl_src2.reg.GetReg(), rl_src1.reg.GetReg());
+  NewLIR3(kMips64Subu, rl_result.reg.GetReg(), rl_result.reg.GetReg(), temp.GetReg());
+  FreeTemp(temp);
+  StoreValue(rl_dest, rl_result);
+}
+
+LIR* Mips64Mir2Lir::OpCmpBranch(ConditionCode cond, RegStorage src1, RegStorage src2, LIR* target) {
+  LIR* branch;
+  Mips64OpCode slt_op;
+  Mips64OpCode br_op;
+  bool cmp_zero = false;
+  bool swapped = false;
+  switch (cond) {
+    case kCondEq:
+      br_op = kMips64Beq;
+      cmp_zero = true;
+      break;
+    case kCondNe:
+      br_op = kMips64Bne;
+      cmp_zero = true;
+      break;
+    case kCondUlt:
+      slt_op = kMips64Sltu;
+      br_op = kMips64Bnez;
+      break;
+    case kCondUge:
+      slt_op = kMips64Sltu;
+      br_op = kMips64Beqz;
+      break;
+    case kCondGe:
+      slt_op = kMips64Slt;
+      br_op = kMips64Beqz;
+      break;
+    case kCondGt:
+      slt_op = kMips64Slt;
+      br_op = kMips64Bnez;
+      swapped = true;
+      break;
+    case kCondLe:
+      slt_op = kMips64Slt;
+      br_op = kMips64Beqz;
+      swapped = true;
+      break;
+    case kCondLt:
+      slt_op = kMips64Slt;
+      br_op = kMips64Bnez;
+      break;
+    case kCondHi:  // Gtu
+      slt_op = kMips64Sltu;
+      br_op = kMips64Bnez;
+      swapped = true;
+      break;
+    default:
+      LOG(FATAL) << "No support for ConditionCode: " << cond;
+      return NULL;
+  }
+  if (cmp_zero) {
+    branch = NewLIR2(br_op, src1.GetReg(), src2.GetReg());
+  } else {
+    RegStorage t_reg = AllocTemp();
+    if (swapped) {
+      NewLIR3(slt_op, t_reg.GetReg(), src2.GetReg(), src1.GetReg());
+    } else {
+      NewLIR3(slt_op, t_reg.GetReg(), src1.GetReg(), src2.GetReg());
+    }
+    branch = NewLIR1(br_op, t_reg.GetReg());
+    FreeTemp(t_reg);
+  }
+  branch->target = target;
+  return branch;
+}
+
+LIR* Mips64Mir2Lir::OpCmpImmBranch(ConditionCode cond, RegStorage reg,
+                                   int check_value, LIR* target) {
+  LIR* branch;
+  if (check_value != 0) {
+    // TUNING: handle s16 & kCondLt/Mi case using slti.
+    RegStorage t_reg = AllocTemp();
+    LoadConstant(t_reg, check_value);
+    branch = OpCmpBranch(cond, reg, t_reg, target);
+    FreeTemp(t_reg);
+    return branch;
+  }
+  Mips64OpCode opc;
+  switch (cond) {
+    case kCondEq: opc = kMips64Beqz; break;
+    case kCondGe: opc = kMips64Bgez; break;
+    case kCondGt: opc = kMips64Bgtz; break;
+    case kCondLe: opc = kMips64Blez; break;
+    // case KCondMi:
+    case kCondLt: opc = kMips64Bltz; break;
+    case kCondNe: opc = kMips64Bnez; break;
+    default:
+      // Tuning: use slti when applicable.
+      RegStorage t_reg = AllocTemp();
+      LoadConstant(t_reg, check_value);
+      branch = OpCmpBranch(cond, reg, t_reg, target);
+      FreeTemp(t_reg);
+      return branch;
+  }
+  branch = NewLIR1(opc, reg.GetReg());
+  branch->target = target;
+  return branch;
+}
+
+LIR* Mips64Mir2Lir::OpRegCopyNoInsert(RegStorage r_dest, RegStorage r_src) {
+  DCHECK(!r_dest.IsPair() && !r_src.IsPair());
+  if (r_dest.IsFloat() || r_src.IsFloat())
+    return OpFpRegCopy(r_dest, r_src);
+  // TODO: Check that r_src and r_dest are both 32 or both 64 bits length.
+  LIR* res;
+  if (r_dest.Is64Bit() || r_src.Is64Bit()) {
+    res = RawLIR(current_dalvik_offset_, kMips64Move, r_dest.GetReg(), r_src.GetReg());
+  } else {
+    res = RawLIR(current_dalvik_offset_, kMips64Sll, r_dest.GetReg(), r_src.GetReg(), 0);
+  }
+  if (!(cu_->disable_opt & (1 << kSafeOptimizations)) && r_dest == r_src) {
+    res->flags.is_nop = true;
+  }
+  return res;
+}
+
+void Mips64Mir2Lir::OpRegCopy(RegStorage r_dest, RegStorage r_src) {
+  if (r_dest != r_src) {
+    LIR *res = OpRegCopyNoInsert(r_dest, r_src);
+    AppendLIR(res);
+  }
+}
+
+void Mips64Mir2Lir::OpRegCopyWide(RegStorage r_dest, RegStorage r_src) {
+  OpRegCopy(r_dest, r_src);
+}
+
+void Mips64Mir2Lir::GenSelectConst32(RegStorage left_op, RegStorage right_op, ConditionCode code,
+                                     int32_t true_val, int32_t false_val, RegStorage rs_dest,
+                                     RegisterClass dest_reg_class) {
+  UNUSED(dest_reg_class);
+  // Implement as a branch-over.
+  // TODO: Conditional move?
+  LoadConstant(rs_dest, true_val);
+  LIR* ne_branchover = OpCmpBranch(code, left_op, right_op, NULL);
+  LoadConstant(rs_dest, false_val);
+  LIR* target_label = NewLIR0(kPseudoTargetLabel);
+  ne_branchover->target = target_label;
+}
+
+void Mips64Mir2Lir::GenSelect(BasicBlock* bb, MIR* mir) {
+  UNUSED(bb, mir);
+  UNIMPLEMENTED(FATAL) << "Need codegen for select";
+}
+
+void Mips64Mir2Lir::GenFusedLongCmpBranch(BasicBlock* bb, MIR* mir) {
+  UNUSED(bb, mir);
+  UNIMPLEMENTED(FATAL) << "Need codegen for fused long cmp branch";
+}
+
+RegLocation Mips64Mir2Lir::GenDivRem(RegLocation rl_dest, RegStorage reg1, RegStorage reg2,
+                                     bool is_div) {
+  RegLocation rl_result = EvalLoc(rl_dest, kCoreReg, true);
+  NewLIR3(is_div ? kMips64Div : kMips64Mod, rl_result.reg.GetReg(), reg1.GetReg(), reg2.GetReg());
+  return rl_result;
+}
+
+RegLocation Mips64Mir2Lir::GenDivRemLit(RegLocation rl_dest, RegStorage reg1, int lit,
+                                        bool is_div) {
+  RegStorage t_reg = AllocTemp();
+  NewLIR3(kMips64Addiu, t_reg.GetReg(), rZERO, lit);
+  RegLocation rl_result = EvalLoc(rl_dest, kCoreReg, true);
+  NewLIR3(is_div ? kMips64Div : kMips64Mod, rl_result.reg.GetReg(), reg1.GetReg(), t_reg.GetReg());
+  FreeTemp(t_reg);
+  return rl_result;
+}
+
+RegLocation Mips64Mir2Lir::GenDivRem(RegLocation rl_dest, RegLocation rl_src1, RegLocation rl_src2,
+                                     bool is_div, int flags) {
+  UNUSED(rl_dest, rl_src1, rl_src2, is_div, flags);
+  LOG(FATAL) << "Unexpected use of GenDivRem for Mips64";
+  UNREACHABLE();
+}
+
+RegLocation Mips64Mir2Lir::GenDivRemLit(RegLocation rl_dest, RegLocation rl_src1, int lit,
+                                        bool is_div) {
+  UNUSED(rl_dest, rl_src1, lit, is_div);
+  LOG(FATAL) << "Unexpected use of GenDivRemLit for Mips64";
+  UNREACHABLE();
+}
+
+bool Mips64Mir2Lir::GenInlinedCas(CallInfo* info, bool is_long, bool is_object) {
+  UNUSED(info, is_long, is_object);
+  return false;
+}
+
+bool Mips64Mir2Lir::GenInlinedAbsFloat(CallInfo* info) {
+  UNUSED(info);
+  // TODO: add Mips64 implementation.
+  return false;
+}
+
+bool Mips64Mir2Lir::GenInlinedAbsDouble(CallInfo* info) {
+  UNUSED(info);
+  // TODO: add Mips64 implementation.
+  return false;
+}
+
+bool Mips64Mir2Lir::GenInlinedSqrt(CallInfo* info) {
+  UNUSED(info);
+  return false;
+}
+
+bool Mips64Mir2Lir::GenInlinedPeek(CallInfo* info, OpSize size) {
+  if (size != kSignedByte) {
+    // MIPS64 supports only aligned access. Defer unaligned access to JNI implementation.
+    return false;
+  }
+  RegLocation rl_src_address = info->args[0];     // Long address.
+  rl_src_address = NarrowRegLoc(rl_src_address);  // Ignore high half in info->args[1].
+  RegLocation rl_dest = InlineTarget(info);
+  RegLocation rl_address = LoadValue(rl_src_address, kCoreReg);
+  RegLocation rl_result = EvalLoc(rl_dest, kCoreReg, true);
+  DCHECK(size == kSignedByte);
+  LoadBaseDisp(rl_address.reg, 0, rl_result.reg, size, kNotVolatile);
+  StoreValue(rl_dest, rl_result);
+  return true;
+}
+
+bool Mips64Mir2Lir::GenInlinedPoke(CallInfo* info, OpSize size) {
+  if (size != kSignedByte) {
+    // MIPS64 supports only aligned access. Defer unaligned access to JNI implementation.
+    return false;
+  }
+  RegLocation rl_src_address = info->args[0];     // Long address.
+  rl_src_address = NarrowRegLoc(rl_src_address);  // Ignore high half in info->args[1].
+  RegLocation rl_src_value = info->args[2];       // [size] value.
+  RegLocation rl_address = LoadValue(rl_src_address, kCoreReg);
+  DCHECK(size == kSignedByte);
+  RegLocation rl_value = LoadValue(rl_src_value, kCoreReg);
+  StoreBaseDisp(rl_address.reg, 0, rl_value.reg, size, kNotVolatile);
+  return true;
+}
+
+LIR* Mips64Mir2Lir::OpPcRelLoad(RegStorage reg, LIR* target) {
+  UNUSED(reg, target);
+  LOG(FATAL) << "Unexpected use of OpPcRelLoad for Mips64";
+  UNREACHABLE();
+}
+
+LIR* Mips64Mir2Lir::OpVldm(RegStorage r_base, int count) {
+  UNUSED(r_base, count);
+  LOG(FATAL) << "Unexpected use of OpVldm for Mips64";
+  UNREACHABLE();
+}
+
+LIR* Mips64Mir2Lir::OpVstm(RegStorage r_base, int count) {
+  UNUSED(r_base, count);
+  LOG(FATAL) << "Unexpected use of OpVstm for Mips64";
+  UNREACHABLE();
+}
+
+void Mips64Mir2Lir::GenMultiplyByTwoBitMultiplier(RegLocation rl_src, RegLocation rl_result,
+                                                  int lit, int first_bit, int second_bit) {
+  UNUSED(lit);
+  RegStorage t_reg = AllocTemp();
+  OpRegRegImm(kOpLsl, t_reg, rl_src.reg, second_bit - first_bit);
+  OpRegRegReg(kOpAdd, rl_result.reg, rl_src.reg, t_reg);
+  FreeTemp(t_reg);
+  if (first_bit != 0) {
+    OpRegRegImm(kOpLsl, rl_result.reg, rl_result.reg, first_bit);
+  }
+}
+
+void Mips64Mir2Lir::GenDivZeroCheckWide(RegStorage reg) {
+  GenDivZeroCheck(reg);
+}
+
+// Test suspend flag, return target of taken suspend branch.
+LIR* Mips64Mir2Lir::OpTestSuspend(LIR* target) {
+  OpRegImm(kOpSub, rs_rMIPS64_SUSPEND, 1);
+  return OpCmpImmBranch((target == NULL) ? kCondEq : kCondNe, rs_rMIPS64_SUSPEND, 0, target);
+}
+
+// Decrement register and branch on condition.
+LIR* Mips64Mir2Lir::OpDecAndBranch(ConditionCode c_code, RegStorage reg, LIR* target) {
+  OpRegImm(kOpSub, reg, 1);
+  return OpCmpImmBranch(c_code, reg, 0, target);
+}
+
+bool Mips64Mir2Lir::SmallLiteralDivRem(Instruction::Code dalvik_opcode, bool is_div,
+                                       RegLocation rl_src, RegLocation rl_dest, int lit) {
+  UNUSED(dalvik_opcode, is_div, rl_src, rl_dest, lit);
+  LOG(FATAL) << "Unexpected use of smallLiteralDive in Mips64";
+  UNREACHABLE();
+}
+
+bool Mips64Mir2Lir::EasyMultiply(RegLocation rl_src, RegLocation rl_dest, int lit) {
+  UNUSED(rl_src, rl_dest, lit);
+  LOG(FATAL) << "Unexpected use of easyMultiply in Mips64";
+  UNREACHABLE();
+}
+
+LIR* Mips64Mir2Lir::OpIT(ConditionCode cond, const char* guide) {
+  UNUSED(cond, guide);
+  LOG(FATAL) << "Unexpected use of OpIT in Mips64";
+  UNREACHABLE();
+}
+
+void Mips64Mir2Lir::OpEndIT(LIR* it) {
+  UNUSED(it);
+  LOG(FATAL) << "Unexpected use of OpEndIT in Mips64";
+}
+
+void Mips64Mir2Lir::GenArithOpLong(Instruction::Code opcode, RegLocation rl_dest,
+                                   RegLocation rl_src1, RegLocation rl_src2, int flags) {
+  switch (opcode) {
+    case Instruction::NOT_LONG:
+      GenNotLong(rl_dest, rl_src2);
+      return;
+    case Instruction::ADD_LONG:
+    case Instruction::ADD_LONG_2ADDR:
+      GenLongOp(kOpAdd, rl_dest, rl_src1, rl_src2);
+      return;
+    case Instruction::SUB_LONG:
+    case Instruction::SUB_LONG_2ADDR:
+      GenLongOp(kOpSub, rl_dest, rl_src1, rl_src2);
+      return;
+    case Instruction::MUL_LONG:
+    case Instruction::MUL_LONG_2ADDR:
+      GenMulLong(rl_dest, rl_src1, rl_src2);
+      return;
+    case Instruction::DIV_LONG:
+    case Instruction::DIV_LONG_2ADDR:
+      GenDivRemLong(opcode, rl_dest, rl_src1, rl_src2, /*is_div*/ true, flags);
+      return;
+    case Instruction::REM_LONG:
+    case Instruction::REM_LONG_2ADDR:
+      GenDivRemLong(opcode, rl_dest, rl_src1, rl_src2, /*is_div*/ false, flags);
+      return;
+    case Instruction::AND_LONG:
+    case Instruction::AND_LONG_2ADDR:
+      GenLongOp(kOpAnd, rl_dest, rl_src1, rl_src2);
+      return;
+    case Instruction::OR_LONG:
+    case Instruction::OR_LONG_2ADDR:
+      GenLongOp(kOpOr, rl_dest, rl_src1, rl_src2);
+      return;
+    case Instruction::XOR_LONG:
+    case Instruction::XOR_LONG_2ADDR:
+      GenLongOp(kOpXor, rl_dest, rl_src1, rl_src2);
+      return;
+    case Instruction::NEG_LONG:
+      GenNegLong(rl_dest, rl_src2);
+      return;
+
+    default:
+      LOG(FATAL) << "Invalid long arith op";
+      return;
+  }
+}
+
+void Mips64Mir2Lir::GenLongOp(OpKind op, RegLocation rl_dest, RegLocation rl_src1,
+                              RegLocation rl_src2) {
+  rl_src1 = LoadValueWide(rl_src1, kCoreReg);
+  rl_src2 = LoadValueWide(rl_src2, kCoreReg);
+  RegLocation rl_result = EvalLocWide(rl_dest, kCoreReg, true);
+  OpRegRegReg(op, rl_result.reg, rl_src1.reg, rl_src2.reg);
+  StoreValueWide(rl_dest, rl_result);
+}
+
+void Mips64Mir2Lir::GenNotLong(RegLocation rl_dest, RegLocation rl_src) {
+  rl_src = LoadValueWide(rl_src, kCoreReg);
+  RegLocation rl_result = EvalLocWide(rl_dest, kCoreReg, true);
+  OpRegReg(kOpMvn, rl_result.reg, rl_src.reg);
+  StoreValueWide(rl_dest, rl_result);
+}
+
+void Mips64Mir2Lir::GenNegLong(RegLocation rl_dest, RegLocation rl_src) {
+  rl_src = LoadValueWide(rl_src, kCoreReg);
+  RegLocation rl_result = EvalLocWide(rl_dest, kCoreReg, true);
+  OpRegReg(kOpNeg, rl_result.reg, rl_src.reg);
+  StoreValueWide(rl_dest, rl_result);
+}
+
+void Mips64Mir2Lir::GenMulLong(RegLocation rl_dest, RegLocation rl_src1, RegLocation rl_src2) {
+  rl_src1 = LoadValueWide(rl_src1, kCoreReg);
+  rl_src2 = LoadValueWide(rl_src2, kCoreReg);
+  RegLocation rl_result = EvalLocWide(rl_dest, kCoreReg, true);
+  NewLIR3(kMips64Dmul, rl_result.reg.GetReg(), rl_src1.reg.GetReg(), rl_src2.reg.GetReg());
+  StoreValueWide(rl_dest, rl_result);
+}
+
+void Mips64Mir2Lir::GenDivRemLong(Instruction::Code opcode, RegLocation rl_dest,
+                                  RegLocation rl_src1, RegLocation rl_src2, bool is_div,
+                                  int flags) {
+  UNUSED(opcode);
+  // TODO: Implement easy div/rem?
+  rl_src1 = LoadValueWide(rl_src1, kCoreReg);
+  rl_src2 = LoadValueWide(rl_src2, kCoreReg);
+  if ((flags & MIR_IGNORE_DIV_ZERO_CHECK) == 0) {
+    GenDivZeroCheckWide(rl_src2.reg);
+  }
+  RegLocation rl_result = EvalLocWide(rl_dest, kCoreReg, true);
+  NewLIR3(is_div ? kMips64Ddiv : kMips64Dmod, rl_result.reg.GetReg(), rl_src1.reg.GetReg(),
+          rl_src2.reg.GetReg());
+  StoreValueWide(rl_dest, rl_result);
+}
+
+/*
+ * Generate array load
+ */
+void Mips64Mir2Lir::GenArrayGet(int opt_flags, OpSize size, RegLocation rl_array,
+                                RegLocation rl_index, RegLocation rl_dest, int scale) {
+  RegisterClass reg_class = RegClassBySize(size);
+  int len_offset = mirror::Array::LengthOffset().Int32Value();
+  int data_offset;
+  RegLocation rl_result;
+  rl_array = LoadValue(rl_array, kRefReg);
+  rl_index = LoadValue(rl_index, kCoreReg);
+
+  // FIXME: need to add support for rl_index.is_const.
+
+  if (size == k64 || size == kDouble) {
+    data_offset = mirror::Array::DataOffset(sizeof(int64_t)).Int32Value();
+  } else {
+    data_offset = mirror::Array::DataOffset(sizeof(int32_t)).Int32Value();
+  }
+
+  // Null object?
+  GenNullCheck(rl_array.reg, opt_flags);
+
+  RegStorage reg_ptr = AllocTempRef();
+  bool needs_range_check = (!(opt_flags & MIR_IGNORE_RANGE_CHECK));
+  RegStorage reg_len;
+  if (needs_range_check) {
+    reg_len = AllocTemp();
+    // Get len.
+    Load32Disp(rl_array.reg, len_offset, reg_len);
+  }
+  // reg_ptr -> array data.
+  OpRegRegImm(kOpAdd, reg_ptr, rl_array.reg, data_offset);
+  FreeTemp(rl_array.reg);
+  if ((size == k64) || (size == kDouble)) {
+    if (scale) {
+      RegStorage r_new_index = AllocTemp();
+      OpRegRegImm(kOpLsl, r_new_index, rl_index.reg, scale);
+      OpRegReg(kOpAdd, reg_ptr, r_new_index);
+      FreeTemp(r_new_index);
+    } else {
+      OpRegReg(kOpAdd, reg_ptr, rl_index.reg);
+    }
+    FreeTemp(rl_index.reg);
+    rl_result = EvalLoc(rl_dest, reg_class, true);
+
+    if (needs_range_check) {
+      GenArrayBoundsCheck(rl_index.reg, reg_len);
+      FreeTemp(reg_len);
+    }
+    LoadBaseDisp(reg_ptr, 0, rl_result.reg, size, kNotVolatile);
+
+    FreeTemp(reg_ptr);
+    StoreValueWide(rl_dest, rl_result);
+  } else {
+    rl_result = EvalLoc(rl_dest, reg_class, true);
+
+    if (needs_range_check) {
+      GenArrayBoundsCheck(rl_index.reg, reg_len);
+      FreeTemp(reg_len);
+    }
+    if (rl_result.ref) {
+      LoadBaseIndexed(reg_ptr, As64BitReg(rl_index.reg), As32BitReg(rl_result.reg), scale,
+                      kReference);
+    } else {
+      LoadBaseIndexed(reg_ptr, As64BitReg(rl_index.reg), rl_result.reg, scale, size);
+    }
+
+    FreeTemp(reg_ptr);
+    StoreValue(rl_dest, rl_result);
+  }
+}
+
+/*
+ * Generate array store
+ *
+ */
+void Mips64Mir2Lir::GenArrayPut(int opt_flags, OpSize size, RegLocation rl_array,
+                                RegLocation rl_index, RegLocation rl_src, int scale,
+                                bool card_mark) {
+  RegisterClass reg_class = RegClassBySize(size);
+  int len_offset = mirror::Array::LengthOffset().Int32Value();
+  int data_offset;
+
+  if (size == k64 || size == kDouble) {
+    data_offset = mirror::Array::DataOffset(sizeof(int64_t)).Int32Value();
+  } else {
+    data_offset = mirror::Array::DataOffset(sizeof(int32_t)).Int32Value();
+  }
+
+  rl_array = LoadValue(rl_array, kRefReg);
+  rl_index = LoadValue(rl_index, kCoreReg);
+
+  // FIXME: need to add support for rl_index.is_const.
+
+  RegStorage reg_ptr;
+  bool allocated_reg_ptr_temp = false;
+  if (IsTemp(rl_array.reg) && !card_mark) {
+    Clobber(rl_array.reg);
+    reg_ptr = rl_array.reg;
+  } else {
+    reg_ptr = AllocTemp();
+    OpRegCopy(reg_ptr, rl_array.reg);
+    allocated_reg_ptr_temp = true;
+  }
+
+  // Null object?
+  GenNullCheck(rl_array.reg, opt_flags);
+
+  bool needs_range_check = (!(opt_flags & MIR_IGNORE_RANGE_CHECK));
+  RegStorage reg_len;
+  if (needs_range_check) {
+    reg_len = AllocTemp();
+    // NOTE: max live temps(4) here.
+    // Get len.
+    Load32Disp(rl_array.reg, len_offset, reg_len);
+  }
+  // reg_ptr -> array data.
+  OpRegImm(kOpAdd, reg_ptr, data_offset);
+  // At this point, reg_ptr points to array, 2 live temps.
+  if ((size == k64) || (size == kDouble)) {
+    // TUNING: specific wide routine that can handle fp regs.
+    if (scale) {
+      RegStorage r_new_index = AllocTemp();
+      OpRegRegImm(kOpLsl, r_new_index, rl_index.reg, scale);
+      OpRegReg(kOpAdd, reg_ptr, r_new_index);
+      FreeTemp(r_new_index);
+    } else {
+      OpRegReg(kOpAdd, reg_ptr, rl_index.reg);
+    }
+    rl_src = LoadValueWide(rl_src, reg_class);
+
+    if (needs_range_check) {
+      GenArrayBoundsCheck(rl_index.reg, reg_len);
+      FreeTemp(reg_len);
+    }
+
+    StoreBaseDisp(reg_ptr, 0, rl_src.reg, size, kNotVolatile);
+  } else {
+    rl_src = LoadValue(rl_src, reg_class);
+    if (needs_range_check) {
+      GenArrayBoundsCheck(rl_index.reg, reg_len);
+      FreeTemp(reg_len);
+    }
+    StoreBaseIndexed(reg_ptr, rl_index.reg, rl_src.reg, scale, size);
+  }
+  if (allocated_reg_ptr_temp) {
+    FreeTemp(reg_ptr);
+  }
+  if (card_mark) {
+    MarkGCCard(opt_flags, rl_src.reg, rl_array.reg);
+  }
+}
+
+void Mips64Mir2Lir::GenShiftOpLong(Instruction::Code opcode, RegLocation rl_dest,
+                                   RegLocation rl_src1, RegLocation rl_shift) {
+  OpKind op = kOpBkpt;
+  switch (opcode) {
+  case Instruction::SHL_LONG:
+  case Instruction::SHL_LONG_2ADDR:
+    op = kOpLsl;
+    break;
+  case Instruction::SHR_LONG:
+  case Instruction::SHR_LONG_2ADDR:
+    op = kOpAsr;
+    break;
+  case Instruction::USHR_LONG:
+  case Instruction::USHR_LONG_2ADDR:
+    op = kOpLsr;
+    break;
+  default:
+    LOG(FATAL) << "Unexpected case: " << opcode;
+  }
+  rl_shift = LoadValue(rl_shift, kCoreReg);
+  rl_src1 = LoadValueWide(rl_src1, kCoreReg);
+  RegLocation rl_result = EvalLocWide(rl_dest, kCoreReg, true);
+  OpRegRegReg(op, rl_result.reg, rl_src1.reg, As64BitReg(rl_shift.reg));
+  StoreValueWide(rl_dest, rl_result);
+}
+
+void Mips64Mir2Lir::GenShiftImmOpLong(Instruction::Code opcode, RegLocation rl_dest,
+                                      RegLocation rl_src1, RegLocation rl_shift, int flags) {
+  UNUSED(flags);
+  OpKind op = kOpBkpt;
+  // Per spec, we only care about low 6 bits of shift amount.
+  int shift_amount = mir_graph_->ConstantValue(rl_shift) & 0x3f;
+  rl_src1 = LoadValueWide(rl_src1, kCoreReg);
+  if (shift_amount == 0) {
+    StoreValueWide(rl_dest, rl_src1);
+    return;
+  }
+
+  RegLocation rl_result = EvalLocWide(rl_dest, kCoreReg, true);
+  switch (opcode) {
+    case Instruction::SHL_LONG:
+    case Instruction::SHL_LONG_2ADDR:
+      op = kOpLsl;
+      break;
+    case Instruction::SHR_LONG:
+    case Instruction::SHR_LONG_2ADDR:
+      op = kOpAsr;
+      break;
+    case Instruction::USHR_LONG:
+    case Instruction::USHR_LONG_2ADDR:
+      op = kOpLsr;
+      break;
+    default:
+      LOG(FATAL) << "Unexpected case";
+  }
+  OpRegRegImm(op, rl_result.reg, rl_src1.reg, shift_amount);
+  StoreValueWide(rl_dest, rl_result);
+}
+
+void Mips64Mir2Lir::GenArithImmOpLong(Instruction::Code opcode, RegLocation rl_dest,
+                                      RegLocation rl_src1, RegLocation rl_src2, int flags) {
+  // Default - bail to non-const handler.
+  GenArithOpLong(opcode, rl_dest, rl_src1, rl_src2, flags);
+}
+
+void Mips64Mir2Lir::GenIntToLong(RegLocation rl_dest, RegLocation rl_src) {
+  rl_src = LoadValue(rl_src, kCoreReg);
+  RegLocation rl_result = EvalLocWide(rl_dest, kCoreReg, true);
+  NewLIR3(kMips64Sll, rl_result.reg.GetReg(), As64BitReg(rl_src.reg).GetReg(), 0);
+  StoreValueWide(rl_dest, rl_result);
+}
+
+void Mips64Mir2Lir::GenConversionCall(QuickEntrypointEnum trampoline, RegLocation rl_dest,
+                                      RegLocation rl_src, RegisterClass reg_class) {
+  FlushAllRegs();   // Send everything to home location.
+  CallRuntimeHelperRegLocation(trampoline, rl_src, false);
+  if (rl_dest.wide) {
+    RegLocation rl_result;
+    rl_result = GetReturnWide(reg_class);
+    StoreValueWide(rl_dest, rl_result);
+  } else {
+    RegLocation rl_result;
+    rl_result = GetReturn(reg_class);
+    StoreValue(rl_dest, rl_result);
+  }
+}
+
+}  // namespace art
diff --git a/compiler/dex/quick/mips64/mips64_lir.h b/compiler/dex/quick/mips64/mips64_lir.h
new file mode 100644
index 0000000..4a5c5ce
--- /dev/null
+++ b/compiler/dex/quick/mips64/mips64_lir.h
@@ -0,0 +1,648 @@
+/*
+ * Copyright (C) 2015 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef ART_COMPILER_DEX_QUICK_MIPS64_MIPS64_LIR_H_
+#define ART_COMPILER_DEX_QUICK_MIPS64_MIPS64_LIR_H_
+
+#include "dex/reg_location.h"
+#include "dex/reg_storage.h"
+
+namespace art {
+
+/*
+ * Runtime register conventions.
+ *
+ * zero is always the value 0
+ * at is scratch (normally used as temp reg by assembler)
+ * v0, v1 are scratch (normally hold subroutine return values)
+ * a0-a7 are scratch (normally hold subroutine arguments)
+ * t0-t3, t8 are scratch
+ * t9 is scratch (normally used for function calls)
+ * s0 (rMIPS_SUSPEND) is reserved [holds suspend-check counter]
+ * s1 (rMIPS_SELF) is reserved [holds current &Thread]
+ * s2-s7 are callee save (promotion target)
+ * k0, k1 are reserved for use by interrupt handlers
+ * gp is reserved for global pointer
+ * sp is reserved
+ * s8 is callee save (promotion target)
+ * ra is scratch (normally holds the return addr)
+ *
+ * Preserved across C calls: s0-s8
+ * Trashed across C calls: at, v0-v1, a0-a7, t0-t3, t8-t9, gp, ra
+ *
+ * Floating pointer registers
+ * NOTE: there are 32 fp registers.
+ * f0-f31
+ *
+ * f0-f31 trashed across C calls
+ *
+ * For mips64 code use:
+ *      a0-a7 to hold operands
+ *      v0-v1 to hold results
+ *      t0-t3, t8-t9 for temps
+ *
+ * All jump/branch instructions have a delay slot after it.
+ *
+ *  Stack frame diagram (stack grows down, higher addresses at top):
+ *
+ * +------------------------+
+ * | IN[ins-1]              |  {Note: resides in caller's frame}
+ * |       .                |
+ * | IN[0]                  |
+ * | caller's Method*       |
+ * +========================+  {Note: start of callee's frame}
+ * | spill region           |  {variable sized - will include lr if non-leaf.}
+ * +------------------------+
+ * | ...filler word...      |  {Note: used as 2nd word of V[locals-1] if long]
+ * +------------------------+
+ * | V[locals-1]            |
+ * | V[locals-2]            |
+ * |      .                 |
+ * |      .                 |
+ * | V[1]                   |
+ * | V[0]                   |
+ * +------------------------+
+ * |  0 to 3 words padding  |
+ * +------------------------+
+ * | OUT[outs-1]            |
+ * | OUT[outs-2]            |
+ * |       .                |
+ * | OUT[0]                 |
+ * | cur_method*            | <<== sp w/ 16-byte alignment
+ * +========================+
+ */
+
+
+#define rARG0 rA0d
+#define rs_rARG0 rs_rA0d
+#define rARG1 rA1d
+#define rs_rARG1 rs_rA1d
+#define rARG2 rA2d
+#define rs_rARG2 rs_rA2d
+#define rARG3 rA3d
+#define rs_rARG3 rs_rA3d
+#define rARG4 rA4d
+#define rs_rARG4 rs_rA4d
+#define rARG5 rA5d
+#define rs_rARG5 rs_rA5d
+#define rARG6 rA6d
+#define rs_rARG6 rs_rA6d
+#define rARG7 rA7d
+#define rs_rARG7 rs_rA7d
+#define rRESULT0 rV0d
+#define rs_rRESULT0 rs_rV0d
+#define rRESULT1 rV1d
+#define rs_rRESULT1 rs_rV1d
+
+#define rFARG0 rF12
+#define rs_rFARG0 rs_rF12
+#define rFARG1 rF13
+#define rs_rFARG1 rs_rF13
+#define rFARG2 rF14
+#define rs_rFARG2 rs_rF14
+#define rFARG3 rF15
+#define rs_rFARG3 rs_rF15
+#define rFARG4 rF16
+#define rs_rFARG4 rs_rF16
+#define rFARG5 rF17
+#define rs_rFARG5 rs_rF17
+#define rFARG6 rF18
+#define rs_rFARG6 rs_rF18
+#define rFARG7 rF19
+#define rs_rFARG7 rs_rF19
+#define rFRESULT0 rF0
+#define rs_rFRESULT0 rs_rF0
+#define rFRESULT1 rF1
+#define rs_rFRESULT1 rs_rF1
+
+// Regs not used for Mips64.
+#define rMIPS64_LR RegStorage::kInvalidRegVal
+#define rMIPS64_PC RegStorage::kInvalidRegVal
+
+enum Mips64ResourceEncodingPos {
+  kMips64GPReg0   = 0,
+  kMips64RegSP    = 29,
+  kMips64RegLR    = 31,
+  kMips64FPReg0   = 32,
+  kMips64FPRegEnd = 64,
+  kMips64RegPC    = kMips64FPRegEnd,
+  kMips64RegEnd   = 65,
+};
+
+enum Mips64NativeRegisterPool {  // private marker to avoid generate-operator-out.py from processing.
+  rZERO  = RegStorage::k32BitSolo | RegStorage::kCoreRegister |  0,
+  rZEROd = RegStorage::k64BitSolo | RegStorage::kCoreRegister |  0,
+  rAT    = RegStorage::k32BitSolo | RegStorage::kCoreRegister |  1,
+  rATd   = RegStorage::k64BitSolo | RegStorage::kCoreRegister |  1,
+  rV0    = RegStorage::k32BitSolo | RegStorage::kCoreRegister |  2,
+  rV0d   = RegStorage::k64BitSolo | RegStorage::kCoreRegister |  2,
+  rV1    = RegStorage::k32BitSolo | RegStorage::kCoreRegister |  3,
+  rV1d   = RegStorage::k64BitSolo | RegStorage::kCoreRegister |  3,
+  rA0    = RegStorage::k32BitSolo | RegStorage::kCoreRegister |  4,
+  rA0d   = RegStorage::k64BitSolo | RegStorage::kCoreRegister |  4,
+  rA1    = RegStorage::k32BitSolo | RegStorage::kCoreRegister |  5,
+  rA1d   = RegStorage::k64BitSolo | RegStorage::kCoreRegister |  5,
+  rA2    = RegStorage::k32BitSolo | RegStorage::kCoreRegister |  6,
+  rA2d   = RegStorage::k64BitSolo | RegStorage::kCoreRegister |  6,
+  rA3    = RegStorage::k32BitSolo | RegStorage::kCoreRegister |  7,
+  rA3d   = RegStorage::k64BitSolo | RegStorage::kCoreRegister |  7,
+  rA4    = RegStorage::k32BitSolo | RegStorage::kCoreRegister |  8,
+  rA4d   = RegStorage::k64BitSolo | RegStorage::kCoreRegister |  8,
+  rA5    = RegStorage::k32BitSolo | RegStorage::kCoreRegister |  9,
+  rA5d   = RegStorage::k64BitSolo | RegStorage::kCoreRegister |  9,
+  rA6    = RegStorage::k32BitSolo | RegStorage::kCoreRegister | 10,
+  rA6d   = RegStorage::k64BitSolo | RegStorage::kCoreRegister | 10,
+  rA7    = RegStorage::k32BitSolo | RegStorage::kCoreRegister | 11,
+  rA7d   = RegStorage::k64BitSolo | RegStorage::kCoreRegister | 11,
+  rT0    = RegStorage::k32BitSolo | RegStorage::kCoreRegister | 12,
+  rT0d   = RegStorage::k64BitSolo | RegStorage::kCoreRegister | 12,
+  rT1    = RegStorage::k32BitSolo | RegStorage::kCoreRegister | 13,
+  rT1d   = RegStorage::k64BitSolo | RegStorage::kCoreRegister | 13,
+  rT2    = RegStorage::k32BitSolo | RegStorage::kCoreRegister | 14,
+  rT2d   = RegStorage::k64BitSolo | RegStorage::kCoreRegister | 14,
+  rT3    = RegStorage::k32BitSolo | RegStorage::kCoreRegister | 15,
+  rT3d   = RegStorage::k64BitSolo | RegStorage::kCoreRegister | 15,
+  rS0    = RegStorage::k32BitSolo | RegStorage::kCoreRegister | 16,
+  rS0d   = RegStorage::k64BitSolo | RegStorage::kCoreRegister | 16,
+  rS1    = RegStorage::k32BitSolo | RegStorage::kCoreRegister | 17,
+  rS1d   = RegStorage::k64BitSolo | RegStorage::kCoreRegister | 17,
+  rS2    = RegStorage::k32BitSolo | RegStorage::kCoreRegister | 18,
+  rS2d   = RegStorage::k64BitSolo | RegStorage::kCoreRegister | 18,
+  rS3    = RegStorage::k32BitSolo | RegStorage::kCoreRegister | 19,
+  rS3d   = RegStorage::k64BitSolo | RegStorage::kCoreRegister | 19,
+  rS4    = RegStorage::k32BitSolo | RegStorage::kCoreRegister | 20,
+  rS4d   = RegStorage::k64BitSolo | RegStorage::kCoreRegister | 20,
+  rS5    = RegStorage::k32BitSolo | RegStorage::kCoreRegister | 21,
+  rS5d   = RegStorage::k64BitSolo | RegStorage::kCoreRegister | 21,
+  rS6    = RegStorage::k32BitSolo | RegStorage::kCoreRegister | 22,
+  rS6d   = RegStorage::k64BitSolo | RegStorage::kCoreRegister | 22,
+  rS7    = RegStorage::k32BitSolo | RegStorage::kCoreRegister | 23,
+  rS7d   = RegStorage::k64BitSolo | RegStorage::kCoreRegister | 23,
+  rT8    = RegStorage::k32BitSolo | RegStorage::kCoreRegister | 24,
+  rT8d   = RegStorage::k64BitSolo | RegStorage::kCoreRegister | 24,
+  rT9    = RegStorage::k32BitSolo | RegStorage::kCoreRegister | 25,
+  rT9d   = RegStorage::k64BitSolo | RegStorage::kCoreRegister | 25,
+  rK0    = RegStorage::k32BitSolo | RegStorage::kCoreRegister | 26,
+  rK0d   = RegStorage::k64BitSolo | RegStorage::kCoreRegister | 26,
+  rK1    = RegStorage::k32BitSolo | RegStorage::kCoreRegister | 27,
+  rK1d   = RegStorage::k64BitSolo | RegStorage::kCoreRegister | 27,
+  rGP    = RegStorage::k32BitSolo | RegStorage::kCoreRegister | 28,
+  rGPd   = RegStorage::k64BitSolo | RegStorage::kCoreRegister | 28,
+  rSP    = RegStorage::k32BitSolo | RegStorage::kCoreRegister | 29,
+  rSPd   = RegStorage::k64BitSolo | RegStorage::kCoreRegister | 29,
+  rFP    = RegStorage::k32BitSolo | RegStorage::kCoreRegister | 30,
+  rFPd   = RegStorage::k64BitSolo | RegStorage::kCoreRegister | 30,
+  rRA    = RegStorage::k32BitSolo | RegStorage::kCoreRegister | 31,
+  rRAd   = RegStorage::k64BitSolo | RegStorage::kCoreRegister | 31,
+
+  rF0  = RegStorage::k32BitSolo | RegStorage::kFloatingPoint |  0,
+  rF1  = RegStorage::k32BitSolo | RegStorage::kFloatingPoint |  1,
+  rF2  = RegStorage::k32BitSolo | RegStorage::kFloatingPoint |  2,
+  rF3  = RegStorage::k32BitSolo | RegStorage::kFloatingPoint |  3,
+  rF4  = RegStorage::k32BitSolo | RegStorage::kFloatingPoint |  4,
+  rF5  = RegStorage::k32BitSolo | RegStorage::kFloatingPoint |  5,
+  rF6  = RegStorage::k32BitSolo | RegStorage::kFloatingPoint |  6,
+  rF7  = RegStorage::k32BitSolo | RegStorage::kFloatingPoint |  7,
+  rF8  = RegStorage::k32BitSolo | RegStorage::kFloatingPoint |  8,
+  rF9  = RegStorage::k32BitSolo | RegStorage::kFloatingPoint |  9,
+  rF10 = RegStorage::k32BitSolo | RegStorage::kFloatingPoint | 10,
+  rF11 = RegStorage::k32BitSolo | RegStorage::kFloatingPoint | 11,
+  rF12 = RegStorage::k32BitSolo | RegStorage::kFloatingPoint | 12,
+  rF13 = RegStorage::k32BitSolo | RegStorage::kFloatingPoint | 13,
+  rF14 = RegStorage::k32BitSolo | RegStorage::kFloatingPoint | 14,
+  rF15 = RegStorage::k32BitSolo | RegStorage::kFloatingPoint | 15,
+  rF16 = RegStorage::k32BitSolo | RegStorage::kFloatingPoint | 16,
+  rF17 = RegStorage::k32BitSolo | RegStorage::kFloatingPoint | 17,
+  rF18 = RegStorage::k32BitSolo | RegStorage::kFloatingPoint | 18,
+  rF19 = RegStorage::k32BitSolo | RegStorage::kFloatingPoint | 19,
+  rF20 = RegStorage::k32BitSolo | RegStorage::kFloatingPoint | 20,
+  rF21 = RegStorage::k32BitSolo | RegStorage::kFloatingPoint | 21,
+  rF22 = RegStorage::k32BitSolo | RegStorage::kFloatingPoint | 22,
+  rF23 = RegStorage::k32BitSolo | RegStorage::kFloatingPoint | 23,
+  rF24 = RegStorage::k32BitSolo | RegStorage::kFloatingPoint | 24,
+  rF25 = RegStorage::k32BitSolo | RegStorage::kFloatingPoint | 25,
+  rF26 = RegStorage::k32BitSolo | RegStorage::kFloatingPoint | 26,
+  rF27 = RegStorage::k32BitSolo | RegStorage::kFloatingPoint | 27,
+  rF28 = RegStorage::k32BitSolo | RegStorage::kFloatingPoint | 28,
+  rF29 = RegStorage::k32BitSolo | RegStorage::kFloatingPoint | 29,
+  rF30 = RegStorage::k32BitSolo | RegStorage::kFloatingPoint | 30,
+  rF31 = RegStorage::k32BitSolo | RegStorage::kFloatingPoint | 31,
+
+  rD0  = RegStorage::k64BitSolo | RegStorage::kFloatingPoint |  0,
+  rD1  = RegStorage::k64BitSolo | RegStorage::kFloatingPoint |  1,
+  rD2  = RegStorage::k64BitSolo | RegStorage::kFloatingPoint |  2,
+  rD3  = RegStorage::k64BitSolo | RegStorage::kFloatingPoint |  3,
+  rD4  = RegStorage::k64BitSolo | RegStorage::kFloatingPoint |  4,
+  rD5  = RegStorage::k64BitSolo | RegStorage::kFloatingPoint |  5,
+  rD6  = RegStorage::k64BitSolo | RegStorage::kFloatingPoint |  6,
+  rD7  = RegStorage::k64BitSolo | RegStorage::kFloatingPoint |  7,
+  rD8  = RegStorage::k64BitSolo | RegStorage::kFloatingPoint |  8,
+  rD9  = RegStorage::k64BitSolo | RegStorage::kFloatingPoint |  9,
+  rD10 = RegStorage::k64BitSolo | RegStorage::kFloatingPoint | 10,
+  rD11 = RegStorage::k64BitSolo | RegStorage::kFloatingPoint | 11,
+  rD12 = RegStorage::k64BitSolo | RegStorage::kFloatingPoint | 12,
+  rD13 = RegStorage::k64BitSolo | RegStorage::kFloatingPoint | 13,
+  rD14 = RegStorage::k64BitSolo | RegStorage::kFloatingPoint | 14,
+  rD15 = RegStorage::k64BitSolo | RegStorage::kFloatingPoint | 15,
+  rD16 = RegStorage::k64BitSolo | RegStorage::kFloatingPoint | 16,
+  rD17 = RegStorage::k64BitSolo | RegStorage::kFloatingPoint | 17,
+  rD18 = RegStorage::k64BitSolo | RegStorage::kFloatingPoint | 18,
+  rD19 = RegStorage::k64BitSolo | RegStorage::kFloatingPoint | 19,
+  rD20 = RegStorage::k64BitSolo | RegStorage::kFloatingPoint | 20,
+  rD21 = RegStorage::k64BitSolo | RegStorage::kFloatingPoint | 21,
+  rD22 = RegStorage::k64BitSolo | RegStorage::kFloatingPoint | 22,
+  rD23 = RegStorage::k64BitSolo | RegStorage::kFloatingPoint | 23,
+  rD24 = RegStorage::k64BitSolo | RegStorage::kFloatingPoint | 24,
+  rD25 = RegStorage::k64BitSolo | RegStorage::kFloatingPoint | 25,
+  rD26 = RegStorage::k64BitSolo | RegStorage::kFloatingPoint | 26,
+  rD27 = RegStorage::k64BitSolo | RegStorage::kFloatingPoint | 27,
+  rD28 = RegStorage::k64BitSolo | RegStorage::kFloatingPoint | 28,
+  rD29 = RegStorage::k64BitSolo | RegStorage::kFloatingPoint | 29,
+  rD30 = RegStorage::k64BitSolo | RegStorage::kFloatingPoint | 30,
+  rD31 = RegStorage::k64BitSolo | RegStorage::kFloatingPoint | 31,
+};
+
+constexpr RegStorage rs_rZERO(RegStorage::kValid | rZERO);
+constexpr RegStorage rs_rZEROd(RegStorage::kValid | rZEROd);
+constexpr RegStorage rs_rAT(RegStorage::kValid | rAT);
+constexpr RegStorage rs_rATd(RegStorage::kValid | rATd);
+constexpr RegStorage rs_rV0(RegStorage::kValid | rV0);
+constexpr RegStorage rs_rV0d(RegStorage::kValid | rV0d);
+constexpr RegStorage rs_rV1(RegStorage::kValid | rV1);
+constexpr RegStorage rs_rV1d(RegStorage::kValid | rV1d);
+constexpr RegStorage rs_rA0(RegStorage::kValid | rA0);
+constexpr RegStorage rs_rA0d(RegStorage::kValid | rA0d);
+constexpr RegStorage rs_rA1(RegStorage::kValid | rA1);
+constexpr RegStorage rs_rA1d(RegStorage::kValid | rA1d);
+constexpr RegStorage rs_rA2(RegStorage::kValid | rA2);
+constexpr RegStorage rs_rA2d(RegStorage::kValid | rA2d);
+constexpr RegStorage rs_rA3(RegStorage::kValid | rA3);
+constexpr RegStorage rs_rA3d(RegStorage::kValid | rA3d);
+constexpr RegStorage rs_rA4(RegStorage::kValid | rA4);
+constexpr RegStorage rs_rA4d(RegStorage::kValid | rA4d);
+constexpr RegStorage rs_rA5(RegStorage::kValid | rA5);
+constexpr RegStorage rs_rA5d(RegStorage::kValid | rA5d);
+constexpr RegStorage rs_rA6(RegStorage::kValid | rA6);
+constexpr RegStorage rs_rA6d(RegStorage::kValid | rA6d);
+constexpr RegStorage rs_rA7(RegStorage::kValid | rA7);
+constexpr RegStorage rs_rA7d(RegStorage::kValid | rA7d);
+constexpr RegStorage rs_rT0(RegStorage::kValid | rT0);
+constexpr RegStorage rs_rT0d(RegStorage::kValid | rT0d);
+constexpr RegStorage rs_rT1(RegStorage::kValid | rT1);
+constexpr RegStorage rs_rT1d(RegStorage::kValid | rT1d);
+constexpr RegStorage rs_rT2(RegStorage::kValid | rT2);
+constexpr RegStorage rs_rT2d(RegStorage::kValid | rT2d);
+constexpr RegStorage rs_rT3(RegStorage::kValid | rT3);
+constexpr RegStorage rs_rT3d(RegStorage::kValid | rT3d);
+constexpr RegStorage rs_rS0(RegStorage::kValid | rS0);
+constexpr RegStorage rs_rS0d(RegStorage::kValid | rS0d);
+constexpr RegStorage rs_rS1(RegStorage::kValid | rS1);
+constexpr RegStorage rs_rS1d(RegStorage::kValid | rS1d);
+constexpr RegStorage rs_rS2(RegStorage::kValid | rS2);
+constexpr RegStorage rs_rS2d(RegStorage::kValid | rS2d);
+constexpr RegStorage rs_rS3(RegStorage::kValid | rS3);
+constexpr RegStorage rs_rS3d(RegStorage::kValid | rS3d);
+constexpr RegStorage rs_rS4(RegStorage::kValid | rS4);
+constexpr RegStorage rs_rS4d(RegStorage::kValid | rS4d);
+constexpr RegStorage rs_rS5(RegStorage::kValid | rS5);
+constexpr RegStorage rs_rS5d(RegStorage::kValid | rS5d);
+constexpr RegStorage rs_rS6(RegStorage::kValid | rS6);
+constexpr RegStorage rs_rS6d(RegStorage::kValid | rS6d);
+constexpr RegStorage rs_rS7(RegStorage::kValid | rS7);
+constexpr RegStorage rs_rS7d(RegStorage::kValid | rS7d);
+constexpr RegStorage rs_rT8(RegStorage::kValid | rT8);
+constexpr RegStorage rs_rT8d(RegStorage::kValid | rT8d);
+constexpr RegStorage rs_rT9(RegStorage::kValid | rT9);
+constexpr RegStorage rs_rT9d(RegStorage::kValid | rT9d);
+constexpr RegStorage rs_rK0(RegStorage::kValid | rK0);
+constexpr RegStorage rs_rK0d(RegStorage::kValid | rK0d);
+constexpr RegStorage rs_rK1(RegStorage::kValid | rK1);
+constexpr RegStorage rs_rK1d(RegStorage::kValid | rK1d);
+constexpr RegStorage rs_rGP(RegStorage::kValid | rGP);
+constexpr RegStorage rs_rGPd(RegStorage::kValid | rGPd);
+constexpr RegStorage rs_rSP(RegStorage::kValid | rSP);
+constexpr RegStorage rs_rSPd(RegStorage::kValid | rSPd);
+constexpr RegStorage rs_rFP(RegStorage::kValid | rFP);
+constexpr RegStorage rs_rFPd(RegStorage::kValid | rFPd);
+constexpr RegStorage rs_rRA(RegStorage::kValid | rRA);
+constexpr RegStorage rs_rRAd(RegStorage::kValid | rRAd);
+
+constexpr RegStorage rs_rMIPS64_LR(RegStorage::kInvalid);     // Not used for MIPS64.
+constexpr RegStorage rs_rMIPS64_PC(RegStorage::kInvalid);     // Not used for MIPS64.
+constexpr RegStorage rs_rMIPS64_COUNT(RegStorage::kInvalid);  // Not used for MIPS64.
+
+constexpr RegStorage rs_rF0(RegStorage::kValid | rF0);
+constexpr RegStorage rs_rF1(RegStorage::kValid | rF1);
+constexpr RegStorage rs_rF2(RegStorage::kValid | rF2);
+constexpr RegStorage rs_rF3(RegStorage::kValid | rF3);
+constexpr RegStorage rs_rF4(RegStorage::kValid | rF4);
+constexpr RegStorage rs_rF5(RegStorage::kValid | rF5);
+constexpr RegStorage rs_rF6(RegStorage::kValid | rF6);
+constexpr RegStorage rs_rF7(RegStorage::kValid | rF7);
+constexpr RegStorage rs_rF8(RegStorage::kValid | rF8);
+constexpr RegStorage rs_rF9(RegStorage::kValid | rF9);
+constexpr RegStorage rs_rF10(RegStorage::kValid | rF10);
+constexpr RegStorage rs_rF11(RegStorage::kValid | rF11);
+constexpr RegStorage rs_rF12(RegStorage::kValid | rF12);
+constexpr RegStorage rs_rF13(RegStorage::kValid | rF13);
+constexpr RegStorage rs_rF14(RegStorage::kValid | rF14);
+constexpr RegStorage rs_rF15(RegStorage::kValid | rF15);
+constexpr RegStorage rs_rF16(RegStorage::kValid | rF16);
+constexpr RegStorage rs_rF17(RegStorage::kValid | rF17);
+constexpr RegStorage rs_rF18(RegStorage::kValid | rF18);
+constexpr RegStorage rs_rF19(RegStorage::kValid | rF19);
+constexpr RegStorage rs_rF20(RegStorage::kValid | rF20);
+constexpr RegStorage rs_rF21(RegStorage::kValid | rF21);
+constexpr RegStorage rs_rF22(RegStorage::kValid | rF22);
+constexpr RegStorage rs_rF23(RegStorage::kValid | rF23);
+constexpr RegStorage rs_rF24(RegStorage::kValid | rF24);
+constexpr RegStorage rs_rF25(RegStorage::kValid | rF25);
+constexpr RegStorage rs_rF26(RegStorage::kValid | rF26);
+constexpr RegStorage rs_rF27(RegStorage::kValid | rF27);
+constexpr RegStorage rs_rF28(RegStorage::kValid | rF28);
+constexpr RegStorage rs_rF29(RegStorage::kValid | rF29);
+constexpr RegStorage rs_rF30(RegStorage::kValid | rF30);
+constexpr RegStorage rs_rF31(RegStorage::kValid | rF31);
+
+constexpr RegStorage rs_rD0(RegStorage::kValid | rD0);
+constexpr RegStorage rs_rD1(RegStorage::kValid | rD1);
+constexpr RegStorage rs_rD2(RegStorage::kValid | rD2);
+constexpr RegStorage rs_rD3(RegStorage::kValid | rD3);
+constexpr RegStorage rs_rD4(RegStorage::kValid | rD4);
+constexpr RegStorage rs_rD5(RegStorage::kValid | rD5);
+constexpr RegStorage rs_rD6(RegStorage::kValid | rD6);
+constexpr RegStorage rs_rD7(RegStorage::kValid | rD7);
+constexpr RegStorage rs_rD8(RegStorage::kValid | rD8);
+constexpr RegStorage rs_rD9(RegStorage::kValid | rD9);
+constexpr RegStorage rs_rD10(RegStorage::kValid | rD10);
+constexpr RegStorage rs_rD11(RegStorage::kValid | rD11);
+constexpr RegStorage rs_rD12(RegStorage::kValid | rD12);
+constexpr RegStorage rs_rD13(RegStorage::kValid | rD13);
+constexpr RegStorage rs_rD14(RegStorage::kValid | rD14);
+constexpr RegStorage rs_rD15(RegStorage::kValid | rD15);
+constexpr RegStorage rs_rD16(RegStorage::kValid | rD16);
+constexpr RegStorage rs_rD17(RegStorage::kValid | rD17);
+constexpr RegStorage rs_rD18(RegStorage::kValid | rD18);
+constexpr RegStorage rs_rD19(RegStorage::kValid | rD19);
+constexpr RegStorage rs_rD20(RegStorage::kValid | rD20);
+constexpr RegStorage rs_rD21(RegStorage::kValid | rD21);
+constexpr RegStorage rs_rD22(RegStorage::kValid | rD22);
+constexpr RegStorage rs_rD23(RegStorage::kValid | rD23);
+constexpr RegStorage rs_rD24(RegStorage::kValid | rD24);
+constexpr RegStorage rs_rD25(RegStorage::kValid | rD25);
+constexpr RegStorage rs_rD26(RegStorage::kValid | rD26);
+constexpr RegStorage rs_rD27(RegStorage::kValid | rD27);
+constexpr RegStorage rs_rD28(RegStorage::kValid | rD28);
+constexpr RegStorage rs_rD29(RegStorage::kValid | rD29);
+constexpr RegStorage rs_rD30(RegStorage::kValid | rD30);
+constexpr RegStorage rs_rD31(RegStorage::kValid | rD31);
+
+// TODO: reduce/eliminate use of these.
+#define rMIPS64_SUSPEND rS0d
+#define rs_rMIPS64_SUSPEND rs_rS0d
+#define rMIPS64_SELF rS1d
+#define rs_rMIPS64_SELF rs_rS1d
+#define rMIPS64_SP rSPd
+#define rs_rMIPS64_SP rs_rSPd
+#define rMIPS64_ARG0 rARG0
+#define rs_rMIPS64_ARG0 rs_rARG0
+#define rMIPS64_ARG1 rARG1
+#define rs_rMIPS64_ARG1 rs_rARG1
+#define rMIPS64_ARG2 rARG2
+#define rs_rMIPS64_ARG2 rs_rARG2
+#define rMIPS64_ARG3 rARG3
+#define rs_rMIPS64_ARG3 rs_rARG3
+#define rMIPS64_ARG4 rARG4
+#define rs_rMIPS64_ARG4 rs_rARG4
+#define rMIPS64_ARG5 rARG5
+#define rs_rMIPS64_ARG5 rs_rARG5
+#define rMIPS64_ARG6 rARG6
+#define rs_rMIPS64_ARG6 rs_rARG6
+#define rMIPS64_ARG7 rARG7
+#define rs_rMIPS64_ARG7 rs_rARG7
+#define rMIPS64_FARG0 rFARG0
+#define rs_rMIPS64_FARG0 rs_rFARG0
+#define rMIPS64_FARG1 rFARG1
+#define rs_rMIPS64_FARG1 rs_rFARG1
+#define rMIPS64_FARG2 rFARG2
+#define rs_rMIPS64_FARG2 rs_rFARG2
+#define rMIPS64_FARG3 rFARG3
+#define rs_rMIPS64_FARG3 rs_rFARG3
+#define rMIPS64_FARG4 rFARG4
+#define rs_rMIPS64_FARG4 rs_rFARG4
+#define rMIPS64_FARG5 rFARG5
+#define rs_rMIPS64_FARG5 rs_rFARG5
+#define rMIPS64_FARG6 rFARG6
+#define rs_rMIPS64_FARG6 rs_rFARG6
+#define rMIPS64_FARG7 rFARG7
+#define rs_rMIPS64_FARG7 rs_rFARG7
+#define rMIPS64_RET0 rRESULT0
+#define rs_rMIPS64_RET0 rs_rRESULT0
+#define rMIPS64_RET1 rRESULT1
+#define rs_rMIPS64_RET1 rs_rRESULT1
+#define rMIPS64_INVOKE_TGT rT9d
+#define rs_rMIPS64_INVOKE_TGT rs_rT9d
+#define rMIPS64_COUNT RegStorage::kInvalidRegVal
+
+// RegisterLocation templates return values (r_V0).
+const RegLocation mips64_loc_c_return
+    {kLocPhysReg, 0, 0, 0, 0, 0, 0, 0, 1,
+     RegStorage(RegStorage::k32BitSolo, rV0), INVALID_SREG, INVALID_SREG};
+const RegLocation mips64_loc_c_return_ref
+    {kLocPhysReg, 0, 0, 0, 0, 0, 1, 0, 1,
+     RegStorage(RegStorage::k64BitSolo, rV0d), INVALID_SREG, INVALID_SREG};
+const RegLocation mips64_loc_c_return_wide
+    {kLocPhysReg, 1, 0, 0, 0, 0, 0, 0, 1,
+     RegStorage(RegStorage::k64BitSolo, rV0d), INVALID_SREG, INVALID_SREG};
+const RegLocation mips64_loc_c_return_float
+    {kLocPhysReg, 0, 0, 0, 1, 0, 0, 0, 1,
+     RegStorage(RegStorage::k32BitSolo, rF0), INVALID_SREG, INVALID_SREG};
+const RegLocation mips64_loc_c_return_double
+    {kLocPhysReg, 1, 0, 0, 1, 0, 0, 0, 1,
+     RegStorage(RegStorage::k64BitSolo, rD0), INVALID_SREG, INVALID_SREG};
+
+enum Mips64ShiftEncodings {
+  kMips64Lsl = 0x0,
+  kMips64Lsr = 0x1,
+  kMips64Asr = 0x2,
+  kMips64Ror = 0x3
+};
+
+// MIPS64 sync kinds (Note: support for kinds other than kSYNC0 may not exist).
+#define kSYNC0        0x00
+#define kSYNC_WMB     0x04
+#define kSYNC_MB      0x01
+#define kSYNC_ACQUIRE 0x11
+#define kSYNC_RELEASE 0x12
+#define kSYNC_RMB     0x13
+
+// TODO: Use smaller hammer when appropriate for target CPU.
+#define kST kSYNC0
+#define kSY kSYNC0
+
+/*
+ * The following enum defines the list of supported Mips64 instructions by the
+ * assembler. Their corresponding EncodingMap positions will be defined in
+ * assemble_mips64.cc.
+ */
+enum Mips64OpCode {
+  kMips64First = 0,
+  kMips6432BitData = kMips64First,  // data [31..0].
+  kMips64Addiu,      // addiu t,s,imm16 [001001] s[25..21] t[20..16] imm16[15..0].
+  kMips64Addu,       // add d,s,t [000000] s[25..21] t[20..16] d[15..11] [00000100001].
+  kMips64And,        // and d,s,t [000000] s[25..21] t[20..16] d[15..11] [00000100100].
+  kMips64Andi,       // andi t,s,imm16 [001100] s[25..21] t[20..16] imm16[15..0].
+  kMips64B,          // b o   [0001000000000000] o[15..0].
+  kMips64Bal,        // bal o [0000010000010001] o[15..0].
+  // NOTE: the code tests the range kMips64Beq thru kMips64Bne, so adding an instruction in this
+  //       range may require updates.
+  kMips64Beq,        // beq s,t,o [000100] s[25..21] t[20..16] o[15..0].
+  kMips64Beqz,       // beqz s,o [000100] s[25..21] [00000] o[15..0].
+  kMips64Bgez,       // bgez s,o [000001] s[25..21] [00001] o[15..0].
+  kMips64Bgtz,       // bgtz s,o [000111] s[25..21] [00000] o[15..0].
+  kMips64Blez,       // blez s,o [000110] s[25..21] [00000] o[15..0].
+  kMips64Bltz,       // bltz s,o [000001] s[25..21] [00000] o[15..0].
+  kMips64Bnez,       // bnez s,o [000101] s[25..21] [00000] o[15..0].
+  kMips64Bne,        // bne s,t,o [000101] s[25..21] t[20..16] o[15..0].
+  kMips64Break,      // break code [000000] code[25..6] [001101].
+  kMips64Daddiu,     // daddiu t,s,imm16 [011001] s[25..21] t[20..16] imm16[15..11].
+  kMips64Daddu,      // daddu d,s,t [000000] s[25..21] t[20..16] d[15..11] [00000101101].
+  kMips64Dahi,       // dahi s,imm16 [000001] s[25..21] [00110] imm16[15..11].
+  kMips64Dati,       // dati s,imm16 [000001] s[25..21] [11110] imm16[15..11].
+  kMips64Daui,       // daui t,s,imm16 [011101] s[25..21] t[20..16] imm16[15..11].
+  kMips64Ddiv,       // ddiv  d,s,t [000000] s[25..21] t[20..16] d[15..11] [00010011110].
+  kMips64Div,        // div   d,s,t [000000] s[25..21] t[20..16] d[15..11] [00010011010].
+  kMips64Dmod,       // dmod  d,s,t [000000] s[25..21] t[20..16] d[15..11] [00011011110].
+  kMips64Dmul,       // dmul  d,s,t [000000] s[25..21] t[20..16] d[15..11] [00010011100].
+  kMips64Dmfc1,      // dmfc1 t,s [01000100001] t[20..16] s[15..11] [00000000000].
+  kMips64Dmtc1,      // dmtc1 t,s [01000100101] t[20..16] s[15..11] [00000000000].
+  kMips64Drotr32,    // drotr32 d,t,a [00000000001] t[20..16] d[15..11] a[10..6] [111110].
+  kMips64Dsll,       // dsll    d,t,a [00000000000] t[20..16] d[15..11] a[10..6] [111000].
+  kMips64Dsll32,     // dsll32  d,t,a [00000000000] t[20..16] d[15..11] a[10..6] [111100].
+  kMips64Dsrl,       // dsrl    d,t,a [00000000000] t[20..16] d[15..11] a[10..6] [111010].
+  kMips64Dsrl32,     // dsrl32  d,t,a [00000000000] t[20..16] d[15..11] a[10..6] [111110].
+  kMips64Dsra,       // dsra    d,t,a [00000000000] t[20..16] d[15..11] a[10..6] [111011].
+  kMips64Dsra32,     // dsra32  d,t,a [00000000000] t[20..16] d[15..11] a[10..6] [111111].
+  kMips64Dsllv,      // dsllv d,t,s [000000] s[25..21] t[20..16] d[15..11] [00000010100].
+  kMips64Dsrlv,      // dsrlv d,t,s [000000] s[25..21] t[20..16] d[15..11] [00000010110].
+  kMips64Dsrav,      // dsrav d,t,s [000000] s[25..21] t[20..16] d[15..11] [00000010111].
+  kMips64Dsubu,      // dsubu d,s,t [000000] s[25..21] t[20..16] d[15..11] [00000101111].
+  kMips64Ext,        // ext t,s,p,z [011111] s[25..21] t[20..16] z[15..11] p[10..6] [000000].
+  kMips64Faddd,      // add.d d,s,t [01000110001] t[20..16] s[15..11] d[10..6] [000000].
+  kMips64Fadds,      // add.s d,s,t [01000110000] t[20..16] s[15..11] d[10..6] [000000].
+  kMips64Fdivd,      // div.d d,s,t [01000110001] t[20..16] s[15..11] d[10..6] [000011].
+  kMips64Fdivs,      // div.s d,s,t [01000110000] t[20..16] s[15..11] d[10..6] [000011].
+  kMips64Fmuld,      // mul.d d,s,t [01000110001] t[20..16] s[15..11] d[10..6] [000010].
+  kMips64Fmuls,      // mul.s d,s,t [01000110000] t[20..16] s[15..11] d[10..6] [000010].
+  kMips64Fsubd,      // sub.d d,s,t [01000110001] t[20..16] s[15..11] d[10..6] [000001].
+  kMips64Fsubs,      // sub.s d,s,t [01000110000] t[20..16] s[15..11] d[10..6] [000001].
+  kMips64Fcvtsd,     // cvt.s.d d,s [01000110001] [00000] s[15..11] d[10..6] [100000].
+  kMips64Fcvtsw,     // cvt.s.w d,s [01000110100] [00000] s[15..11] d[10..6] [100000].
+  kMips64Fcvtds,     // cvt.d.s d,s [01000110000] [00000] s[15..11] d[10..6] [100001].
+  kMips64Fcvtdw,     // cvt.d.w d,s [01000110100] [00000] s[15..11] d[10..6] [100001].
+  kMips64Fcvtws,     // cvt.w.d d,s [01000110000] [00000] s[15..11] d[10..6] [100100].
+  kMips64Fcvtwd,     // cvt.w.d d,s [01000110001] [00000] s[15..11] d[10..6] [100100].
+  kMips64Fmovd,      // mov.d d,s [01000110001] [00000] s[15..11] d[10..6] [000110].
+  kMips64Fmovs,      // mov.s d,s [01000110000] [00000] s[15..11] d[10..6] [000110].
+  kMips64Fnegd,      // neg.d d,s [01000110001] [00000] s[15..11] d[10..6] [000111].
+  kMips64Fnegs,      // neg.s d,s [01000110000] [00000] s[15..11] d[10..6] [000111].
+  kMips64Fldc1,      // ldc1 t,o(b) [110101] b[25..21] t[20..16] o[15..0].
+  kMips64Flwc1,      // lwc1 t,o(b) [110001] b[25..21] t[20..16] o[15..0].
+  kMips64Fsdc1,      // sdc1 t,o(b) [111101] b[25..21] t[20..16] o[15..0].
+  kMips64Fswc1,      // swc1 t,o(b) [111001] b[25..21] t[20..16] o[15..0].
+  kMips64Jal,        // jal t [000011] t[25..0].
+  kMips64Jalr,       // jalr d,s [000000] s[25..21] [00000] d[15..11] hint[10..6] [001001].
+  kMips64Lahi,       // lui t,imm16 [00111100000] t[20..16] imm16[15..0] load addr hi.
+  kMips64Lalo,       // ori t,s,imm16 [001001] s[25..21] t[20..16] imm16[15..0] load addr lo.
+  kMips64Lb,         // lb  t,o(b) [100000] b[25..21] t[20..16] o[15..0].
+  kMips64Lbu,        // lbu t,o(b) [100100] b[25..21] t[20..16] o[15..0].
+  kMips64Ld,         // ld  t,o(b) [110111] b[25..21] t[20..16] o[15..0].
+  kMips64Lh,         // lh  t,o(b) [100001] b[25..21] t[20..16] o[15..0].
+  kMips64Lhu,        // lhu t,o(b) [100101] b[25..21] t[20..16] o[15..0].
+  kMips64Lui,        // lui t,imm16 [00111100000] t[20..16] imm16[15..0].
+  kMips64Lw,         // lw  t,o(b) [100011] b[25..21] t[20..16] o[15..0].
+  kMips64Lwu,        // lwu t,o(b) [100111] b[25..21] t[20..16] o[15..0].
+  kMips64Mfc1,       // mfc1 t,s [01000100000] t[20..16] s[15..11] [00000000000].
+  kMips64Mtc1,       // mtc1 t,s [01000100100] t[20..16] s[15..11] [00000000000].
+  kMips64Move,       // move d,s [000000] s[25..21] [00000] d[15..11] [00000101101].
+  kMips64Mod,        // mod d,s,t [000000] s[25..21] t[20..16] d[15..11] [00011011010].
+  kMips64Mul,        // mul d,s,t [000000] s[25..21] t[20..16] d[15..11] [00010011000].
+  kMips64Nop,        // nop [00000000000000000000000000000000].
+  kMips64Nor,        // nor d,s,t [000000] s[25..21] t[20..16] d[15..11] [00000100111].
+  kMips64Or,         // or  d,s,t [000000] s[25..21] t[20..16] d[15..11] [00000100101].
+  kMips64Ori,        // ori t,s,imm16 [001001] s[25..21] t[20..16] imm16[15..0].
+  kMips64Sb,         // sb t,o(b) [101000] b[25..21] t[20..16] o[15..0].
+  kMips64Sd,         // sd t,o(b) [111111] b[25..21] t[20..16] o[15..0].
+  kMips64Seb,        // seb d,t [01111100000] t[20..16] d[15..11] [10000100000].
+  kMips64Seh,        // seh d,t [01111100000] t[20..16] d[15..11] [11000100000].
+  kMips64Sh,         // sh t,o(b) [101001] b[25..21] t[20..16] o[15..0].
+  kMips64Sll,        // sll d,t,a [00000000000] t[20..16] d[15..11] a[10..6] [000000].
+  kMips64Sllv,       // sllv d,t,s [000000] s[25..21] t[20..16] d[15..11] [00000000100].
+  kMips64Slt,        // slt d,s,t [000000] s[25..21] t[20..16] d[15..11] [00000101010].
+  kMips64Slti,       // slti t,s,imm16 [001010] s[25..21] t[20..16] imm16[15..0].
+  kMips64Sltu,       // sltu d,s,t [000000] s[25..21] t[20..16] d[15..11] [00000101011].
+  kMips64Sra,        // sra d,s,imm5 [00000000000] t[20..16] d[15..11] imm5[10..6] [000011].
+  kMips64Srav,       // srav d,t,s [000000] s[25..21] t[20..16] d[15..11] [00000000111].
+  kMips64Srl,        // srl d,t,a [00000000000] t[20..16] d[20..16] a[10..6] [000010].
+  kMips64Srlv,       // srlv d,t,s [000000] s[25..21] t[20..16] d[15..11] [00000000110].
+  kMips64Subu,       // subu d,s,t [000000] s[25..21] t[20..16] d[15..11] [00000100011].
+  kMips64Sw,         // sw t,o(b) [101011] b[25..21] t[20..16] o[15..0].
+  kMips64Sync,       // sync kind [000000] [0000000000000000] s[10..6] [001111].
+  kMips64Xor,        // xor d,s,t [000000] s[25..21] t[20..16] d[15..11] [00000100110].
+  kMips64Xori,       // xori t,s,imm16 [001110] s[25..21] t[20..16] imm16[15..0].
+  kMips64CurrPC,     // jal to .+8 to materialize pc.
+  kMips64Delta,      // Psuedo for ori t, s, <label>-<label>.
+  kMips64DeltaHi,    // Pseudo for lui t, high16(<label>-<label>).
+  kMips64DeltaLo,    // Pseudo for ori t, s, low16(<label>-<label>).
+  kMips64Undefined,  // undefined [011001xxxxxxxxxxxxxxxx].
+  kMips64Last
+};
+std::ostream& operator<<(std::ostream& os, const Mips64OpCode& rhs);
+
+// Instruction assembly field_loc kind.
+enum Mips64EncodingKind {
+  kFmtUnused,
+  kFmtBitBlt,    // Bit string using end/start.
+  kFmtDfp,       // Double FP reg.
+  kFmtSfp,       // Single FP reg.
+  kFmtBlt5_2,    // Same 5-bit field to 2 locations.
+};
+std::ostream& operator<<(std::ostream& os, const Mips64EncodingKind& rhs);
+
+// Struct used to define the snippet positions for each MIPS64 opcode.
+struct Mips64EncodingMap {
+  uint32_t skeleton;
+  struct {
+    Mips64EncodingKind kind;
+    int end;   // end for kFmtBitBlt, 1-bit slice end for FP regs.
+    int start;  // start for kFmtBitBlt, 4-bit slice end for FP regs.
+  } field_loc[4];
+  Mips64OpCode opcode;
+  uint64_t flags;
+  const char *name;
+  const char* fmt;
+  int size;   // Note: size is in bytes.
+};
+
+extern Mips64EncodingMap EncodingMap[kMips64Last];
+
+#define IS_UIMM16(v) ((0 <= (v)) && ((v) <= 65535))
+#define IS_SIMM16(v) ((-32768 <= (v)) && ((v) <= 32766))
+#define IS_SIMM16_2WORD(v) ((-32764 <= (v)) && ((v) <= 32763))  // 2 offsets must fit.
+
+}  // namespace art
+
+#endif  // ART_COMPILER_DEX_QUICK_MIPS64_MIPS64_LIR_H_
diff --git a/compiler/dex/quick/mips64/target_mips64.cc b/compiler/dex/quick/mips64/target_mips64.cc
new file mode 100644
index 0000000..6ed9617
--- /dev/null
+++ b/compiler/dex/quick/mips64/target_mips64.cc
@@ -0,0 +1,653 @@
+/*
+ * Copyright (C) 2015 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "codegen_mips64.h"
+
+#include <inttypes.h>
+
+#include <string>
+
+#include "arch/mips64/instruction_set_features_mips64.h"
+#include "backend_mips64.h"
+#include "base/logging.h"
+#include "dex/compiler_ir.h"
+#include "dex/quick/mir_to_lir-inl.h"
+#include "driver/compiler_driver.h"
+#include "mips64_lir.h"
+
+namespace art {
+
+static constexpr RegStorage core_regs_arr32[] =
+    {rs_rZERO, rs_rAT, rs_rV0, rs_rV1, rs_rA0, rs_rA1, rs_rA2, rs_rA3, rs_rA4, rs_rA5, rs_rA6,
+     rs_rA7, rs_rT0, rs_rT1, rs_rT2, rs_rT3, rs_rS0, rs_rS1, rs_rS2, rs_rS3, rs_rS4, rs_rS5,
+     rs_rS6, rs_rS7, rs_rT8, rs_rT9, rs_rK0, rs_rK1, rs_rGP, rs_rSP, rs_rFP, rs_rRA};
+static constexpr RegStorage core_regs_arr64[] =
+    {rs_rZEROd, rs_rATd, rs_rV0d, rs_rV1d, rs_rA0d, rs_rA1d, rs_rA2d, rs_rA3d, rs_rA4d, rs_rA5d,
+     rs_rA6d, rs_rA7d, rs_rT0d, rs_rT1d, rs_rT2d, rs_rT3d, rs_rS0d, rs_rS1d, rs_rS2d, rs_rS3d,
+     rs_rS4d, rs_rS5d, rs_rS6d, rs_rS7d, rs_rT8d, rs_rT9d, rs_rK0d, rs_rK1d, rs_rGPd, rs_rSPd,
+     rs_rFPd, rs_rRAd};
+#if 0
+// TODO: f24-f31 must be saved before calls and restored after.
+static constexpr RegStorage sp_regs_arr[] =
+    {rs_rF0, rs_rF1, rs_rF2, rs_rF3, rs_rF4, rs_rF5, rs_rF6, rs_rF7, rs_rF8, rs_rF9, rs_rF10,
+     rs_rF11, rs_rF12, rs_rF13, rs_rF14, rs_rF15, rs_rF16, rs_rF17, rs_rF18, rs_rF19, rs_rF20,
+     rs_rF21, rs_rF22, rs_rF23, rs_rF24, rs_rF25, rs_rF26, rs_rF27, rs_rF28, rs_rF29, rs_rF30,
+     rs_rF31};
+static constexpr RegStorage dp_regs_arr[] =
+    {rs_rD0, rs_rD1, rs_rD2, rs_rD3, rs_rD4, rs_rD5, rs_rD6, rs_rD7, rs_rD8, rs_rD9, rs_rD10,
+     rs_rD11, rs_rD12, rs_rD13, rs_rD14, rs_rD15, rs_rD16, rs_rD17, rs_rD18, rs_rD19, rs_rD20,
+     rs_rD21, rs_rD22, rs_rD23, rs_rD24, rs_rD25, rs_rD26, rs_rD27, rs_rD28, rs_rD29, rs_rD30,
+     rs_rD31};
+#else
+static constexpr RegStorage sp_regs_arr[] =
+    {rs_rF0, rs_rF1, rs_rF2, rs_rF3, rs_rF4, rs_rF5, rs_rF6, rs_rF7, rs_rF8, rs_rF9, rs_rF10,
+     rs_rF11, rs_rF12, rs_rF13, rs_rF14, rs_rF15, rs_rF16, rs_rF17, rs_rF18, rs_rF19, rs_rF20,
+     rs_rF21, rs_rF22, rs_rF23};
+static constexpr RegStorage dp_regs_arr[] =
+    {rs_rD0, rs_rD1, rs_rD2, rs_rD3, rs_rD4, rs_rD5, rs_rD6, rs_rD7, rs_rD8, rs_rD9, rs_rD10,
+     rs_rD11, rs_rD12, rs_rD13, rs_rD14, rs_rD15, rs_rD16, rs_rD17, rs_rD18, rs_rD19, rs_rD20,
+     rs_rD21, rs_rD22, rs_rD23};
+#endif
+static constexpr RegStorage reserved_regs_arr32[] =
+    {rs_rZERO, rs_rAT, rs_rS0, rs_rS1, rs_rT9, rs_rK0, rs_rK1, rs_rGP, rs_rSP, rs_rRA};
+static constexpr RegStorage reserved_regs_arr64[] =
+    {rs_rZEROd, rs_rATd, rs_rS0d, rs_rS1d, rs_rT9d, rs_rK0d, rs_rK1d, rs_rGPd, rs_rSPd, rs_rRAd};
+static constexpr RegStorage core_temps_arr32[] =
+    {rs_rV0, rs_rV1, rs_rA0, rs_rA1, rs_rA2, rs_rA3, rs_rA4, rs_rA5, rs_rA6, rs_rA7, rs_rT0,
+     rs_rT1, rs_rT2, rs_rT3, rs_rT8};
+static constexpr RegStorage core_temps_arr64[] =
+    {rs_rV0d, rs_rV1d, rs_rA0d, rs_rA1d, rs_rA2d, rs_rA3d, rs_rA4d, rs_rA5d, rs_rA6d, rs_rA7d,
+     rs_rT0d, rs_rT1d, rs_rT2d, rs_rT3d, rs_rT8d};
+#if 0
+// TODO: f24-f31 must be saved before calls and restored after.
+static constexpr RegStorage sp_temps_arr[] =
+    {rs_rF0, rs_rF1, rs_rF2, rs_rF3, rs_rF4, rs_rF5, rs_rF6, rs_rF7, rs_rF8, rs_rF9, rs_rF10,
+     rs_rF11, rs_rF12, rs_rF13, rs_rF14, rs_rF15, rs_rF16, rs_rF17, rs_rF18, rs_rF19, rs_rF20,
+     rs_rF21, rs_rF22, rs_rF23, rs_rF24, rs_rF25, rs_rF26, rs_rF27, rs_rF28, rs_rF29, rs_rF30,
+     rs_rF31};
+static constexpr RegStorage dp_temps_arr[] =
+    {rs_rD0, rs_rD1, rs_rD2, rs_rD3, rs_rD4, rs_rD5, rs_rD6, rs_rD7, rs_rD8, rs_rD9, rs_rD10,
+     rs_rD11, rs_rD12, rs_rD13, rs_rD14, rs_rD15, rs_rD16, rs_rD17, rs_rD18, rs_rD19, rs_rD20,
+     rs_rD21, rs_rD22, rs_rD23, rs_rD24, rs_rD25, rs_rD26, rs_rD27, rs_rD28, rs_rD29, rs_rD30,
+     rs_rD31};
+#else
+static constexpr RegStorage sp_temps_arr[] =
+    {rs_rF0, rs_rF1, rs_rF2, rs_rF3, rs_rF4, rs_rF5, rs_rF6, rs_rF7, rs_rF8, rs_rF9, rs_rF10,
+     rs_rF11, rs_rF12, rs_rF13, rs_rF14, rs_rF15, rs_rF16, rs_rF17, rs_rF18, rs_rF19, rs_rF20,
+     rs_rF21, rs_rF22, rs_rF23};
+static constexpr RegStorage dp_temps_arr[] =
+    {rs_rD0, rs_rD1, rs_rD2, rs_rD3, rs_rD4, rs_rD5, rs_rD6, rs_rD7, rs_rD8, rs_rD9, rs_rD10,
+     rs_rD11, rs_rD12, rs_rD13, rs_rD14, rs_rD15, rs_rD16, rs_rD17, rs_rD18, rs_rD19, rs_rD20,
+     rs_rD21, rs_rD22, rs_rD23};
+#endif
+
+static constexpr ArrayRef<const RegStorage> empty_pool;
+static constexpr ArrayRef<const RegStorage> core_regs32(core_regs_arr32);
+static constexpr ArrayRef<const RegStorage> core_regs64(core_regs_arr64);
+static constexpr ArrayRef<const RegStorage> sp_regs(sp_regs_arr);
+static constexpr ArrayRef<const RegStorage> dp_regs(dp_regs_arr);
+static constexpr ArrayRef<const RegStorage> reserved_regs32(reserved_regs_arr32);
+static constexpr ArrayRef<const RegStorage> reserved_regs64(reserved_regs_arr64);
+static constexpr ArrayRef<const RegStorage> core_temps32(core_temps_arr32);
+static constexpr ArrayRef<const RegStorage> core_temps64(core_temps_arr64);
+static constexpr ArrayRef<const RegStorage> sp_temps(sp_temps_arr);
+static constexpr ArrayRef<const RegStorage> dp_temps(dp_temps_arr);
+
+RegLocation Mips64Mir2Lir::LocCReturn() {
+  return mips64_loc_c_return;
+}
+
+RegLocation Mips64Mir2Lir::LocCReturnRef() {
+  return mips64_loc_c_return_ref;
+}
+
+RegLocation Mips64Mir2Lir::LocCReturnWide() {
+  return mips64_loc_c_return_wide;
+}
+
+RegLocation Mips64Mir2Lir::LocCReturnFloat() {
+  return mips64_loc_c_return_float;
+}
+
+RegLocation Mips64Mir2Lir::LocCReturnDouble() {
+  return mips64_loc_c_return_double;
+}
+
+// Return a target-dependent special register.
+RegStorage Mips64Mir2Lir::TargetReg(SpecialTargetRegister reg) {
+  RegStorage res_reg;
+  switch (reg) {
+    case kSelf: res_reg = rs_rS1; break;
+    case kSuspend: res_reg =  rs_rS0; break;
+    case kLr: res_reg =  rs_rRA; break;
+    case kPc: res_reg = RegStorage::InvalidReg(); break;
+    case kSp: res_reg =  rs_rSP; break;
+    case kArg0: res_reg = rs_rA0; break;
+    case kArg1: res_reg = rs_rA1; break;
+    case kArg2: res_reg = rs_rA2; break;
+    case kArg3: res_reg = rs_rA3; break;
+    case kArg4: res_reg = rs_rA4; break;
+    case kArg5: res_reg = rs_rA5; break;
+    case kArg6: res_reg = rs_rA6; break;
+    case kArg7: res_reg = rs_rA7; break;
+    case kFArg0: res_reg = rs_rF12; break;
+    case kFArg1: res_reg = rs_rF13; break;
+    case kFArg2: res_reg = rs_rF14; break;
+    case kFArg3: res_reg = rs_rF15; break;
+    case kFArg4: res_reg = rs_rF16; break;
+    case kFArg5: res_reg = rs_rF17; break;
+    case kFArg6: res_reg = rs_rF18; break;
+    case kFArg7: res_reg = rs_rF19; break;
+    case kRet0: res_reg = rs_rV0; break;
+    case kRet1: res_reg = rs_rV1; break;
+    case kInvokeTgt: res_reg = rs_rT9; break;
+    case kHiddenArg: res_reg = rs_rT0; break;
+    case kHiddenFpArg: res_reg = RegStorage::InvalidReg(); break;
+    case kCount: res_reg = RegStorage::InvalidReg(); break;
+    default: res_reg = RegStorage::InvalidReg();
+  }
+  return res_reg;
+}
+
+RegStorage Mips64Mir2Lir::InToRegStorageMips64Mapper::GetNextReg(ShortyArg arg) {
+  const SpecialTargetRegister coreArgMappingToPhysicalReg[] =
+      {kArg1, kArg2, kArg3, kArg4, kArg5, kArg6, kArg7};
+  const size_t coreArgMappingToPhysicalRegSize = arraysize(coreArgMappingToPhysicalReg);
+  const SpecialTargetRegister fpArgMappingToPhysicalReg[] =
+      {kFArg1, kFArg2, kFArg3, kFArg4, kFArg5, kFArg6, kFArg7};
+  const size_t fpArgMappingToPhysicalRegSize = arraysize(fpArgMappingToPhysicalReg);
+
+  RegStorage result = RegStorage::InvalidReg();
+  if (arg.IsFP()) {
+    if (cur_arg_reg_ < fpArgMappingToPhysicalRegSize) {
+      DCHECK(!arg.IsRef());
+      result = m2l_->TargetReg(fpArgMappingToPhysicalReg[cur_arg_reg_++],
+                               arg.IsWide() ? kWide : kNotWide);
+    }
+  } else {
+    if (cur_arg_reg_ < coreArgMappingToPhysicalRegSize) {
+      DCHECK(!(arg.IsWide() && arg.IsRef()));
+      result = m2l_->TargetReg(coreArgMappingToPhysicalReg[cur_arg_reg_++],
+                               arg.IsRef() ? kRef : (arg.IsWide() ? kWide : kNotWide));
+    }
+  }
+  return result;
+}
+
+/*
+ * Decode the register id.
+ */
+ResourceMask Mips64Mir2Lir::GetRegMaskCommon(const RegStorage& reg) const {
+  return ResourceMask::Bit((reg.IsFloat() ? kMips64FPReg0 : 0) + reg.GetRegNum());
+}
+
+ResourceMask Mips64Mir2Lir::GetPCUseDefEncoding() const {
+  return ResourceMask::Bit(kMips64RegPC);
+}
+
+
+void Mips64Mir2Lir::SetupTargetResourceMasks(LIR* lir, uint64_t flags, ResourceMask* use_mask,
+                                             ResourceMask* def_mask) {
+  DCHECK(!lir->flags.use_def_invalid);
+
+  // Mips64-specific resource map setup here.
+  if (flags & REG_DEF_SP) {
+    def_mask->SetBit(kMips64RegSP);
+  }
+
+  if (flags & REG_USE_SP) {
+    use_mask->SetBit(kMips64RegSP);
+  }
+
+  if (flags & REG_DEF_LR) {
+    def_mask->SetBit(kMips64RegLR);
+  }
+}
+
+/* For dumping instructions */
+#define MIPS64_REG_COUNT 32
+static const char *mips64_reg_name[MIPS64_REG_COUNT] = {
+  "zero", "at", "v0", "v1", "a0", "a1", "a2", "a3",
+  "a4", "a5", "a6", "a7", "t0", "t1", "t2", "t3",
+  "s0", "s1", "s2", "s3", "s4", "s5", "s6", "s7",
+  "t8", "t9", "k0", "k1", "gp", "sp", "fp", "ra"
+};
+
+/*
+ * Interpret a format string and build a string no longer than size
+ * See format key in assemble_mips64.cc.
+ */
+std::string Mips64Mir2Lir::BuildInsnString(const char *fmt, LIR *lir, unsigned char* base_addr) {
+  std::string buf;
+  int i;
+  const char *fmt_end = &fmt[strlen(fmt)];
+  char tbuf[256];
+  char nc;
+  while (fmt < fmt_end) {
+    int operand;
+    if (*fmt == '!') {
+      fmt++;
+      DCHECK_LT(fmt, fmt_end);
+      nc = *fmt++;
+      if (nc == '!') {
+        strcpy(tbuf, "!");
+      } else {
+        DCHECK_LT(fmt, fmt_end);
+        DCHECK_LT(static_cast<unsigned>(nc-'0'), 4u);
+        operand = lir->operands[nc-'0'];
+        switch (*fmt++) {
+          case 'b':
+            strcpy(tbuf, "0000");
+            for (i = 3; i >= 0; i--) {
+              tbuf[i] += operand & 1;
+              operand >>= 1;
+            }
+            break;
+          case 's':
+            snprintf(tbuf, arraysize(tbuf), "$f%d", RegStorage::RegNum(operand));
+            break;
+          case 'S':
+            DCHECK_EQ(RegStorage::RegNum(operand) & 1, 0);
+            snprintf(tbuf, arraysize(tbuf), "$f%d", RegStorage::RegNum(operand));
+            break;
+          case 'h':
+            snprintf(tbuf, arraysize(tbuf), "%04x", operand);
+            break;
+          case 'M':
+          case 'd':
+            snprintf(tbuf, arraysize(tbuf), "%d", operand);
+            break;
+          case 'D':
+            snprintf(tbuf, arraysize(tbuf), "%d", operand+1);
+            break;
+          case 'E':
+            snprintf(tbuf, arraysize(tbuf), "%d", operand*4);
+            break;
+          case 'F':
+            snprintf(tbuf, arraysize(tbuf), "%d", operand*2);
+            break;
+          case 't':
+            snprintf(tbuf, arraysize(tbuf), "0x%08" PRIxPTR " (L%p)",
+                     reinterpret_cast<uintptr_t>(base_addr) + lir->offset + 4 + (operand << 1),
+                     lir->target);
+            break;
+          case 'T':
+            snprintf(tbuf, arraysize(tbuf), "0x%08x", operand << 2);
+            break;
+          case 'u': {
+            int offset_1 = lir->operands[0];
+            int offset_2 = NEXT_LIR(lir)->operands[0];
+            uintptr_t target =
+                (((reinterpret_cast<uintptr_t>(base_addr) + lir->offset + 4) & ~3) +
+                    (offset_1 << 21 >> 9) + (offset_2 << 1)) & 0xfffffffc;
+            snprintf(tbuf, arraysize(tbuf), "%p", reinterpret_cast<void*>(target));
+            break;
+          }
+
+          /* Nothing to print for BLX_2 */
+          case 'v':
+            strcpy(tbuf, "see above");
+            break;
+          case 'r':
+            DCHECK(operand >= 0 && operand < MIPS64_REG_COUNT);
+            strcpy(tbuf, mips64_reg_name[operand]);
+            break;
+          case 'N':
+            // Placeholder for delay slot handling
+            strcpy(tbuf, ";  nop");
+            break;
+          default:
+            strcpy(tbuf, "DecodeError");
+            break;
+        }
+        buf += tbuf;
+      }
+    } else {
+      buf += *fmt++;
+    }
+  }
+  return buf;
+}
+
+// FIXME: need to redo resource maps for MIPS64 - fix this at that time.
+void Mips64Mir2Lir::DumpResourceMask(LIR *mips64_lir, const ResourceMask& mask, const char *prefix) {
+  char buf[256];
+  buf[0] = 0;
+
+  if (mask.Equals(kEncodeAll)) {
+    strcpy(buf, "all");
+  } else {
+    char num[8];
+    int i;
+
+    for (i = 0; i < kMips64RegEnd; i++) {
+      if (mask.HasBit(i)) {
+        snprintf(num, arraysize(num), "%d ", i);
+        strcat(buf, num);
+      }
+    }
+
+    if (mask.HasBit(ResourceMask::kCCode)) {
+      strcat(buf, "cc ");
+    }
+    if (mask.HasBit(ResourceMask::kFPStatus)) {
+      strcat(buf, "fpcc ");
+    }
+    // Memory bits.
+    if (mips64_lir && (mask.HasBit(ResourceMask::kDalvikReg))) {
+      snprintf(buf + strlen(buf), arraysize(buf) - strlen(buf), "dr%d%s",
+               DECODE_ALIAS_INFO_REG(mips64_lir->flags.alias_info),
+               DECODE_ALIAS_INFO_WIDE(mips64_lir->flags.alias_info) ? "(+1)" : "");
+    }
+    if (mask.HasBit(ResourceMask::kLiteral)) {
+      strcat(buf, "lit ");
+    }
+
+    if (mask.HasBit(ResourceMask::kHeapRef)) {
+      strcat(buf, "heap ");
+    }
+    if (mask.HasBit(ResourceMask::kMustNotAlias)) {
+      strcat(buf, "noalias ");
+    }
+  }
+  if (buf[0]) {
+    LOG(INFO) << prefix << ": " <<  buf;
+  }
+}
+
+/*
+ * TUNING: is true leaf?  Can't just use METHOD_IS_LEAF to determine as some
+ * instructions might call out to C/assembly helper functions.  Until
+ * machinery is in place, always spill lr.
+ */
+
+void Mips64Mir2Lir::AdjustSpillMask() {
+  core_spill_mask_ |= (1 << rs_rRA.GetRegNum());
+  num_core_spills_++;
+}
+
+/* Clobber all regs that might be used by an external C call */
+void Mips64Mir2Lir::ClobberCallerSave() {
+  Clobber(rs_rZEROd);
+  Clobber(rs_rATd);
+  Clobber(rs_rV0d);
+  Clobber(rs_rV1d);
+  Clobber(rs_rA0d);
+  Clobber(rs_rA1d);
+  Clobber(rs_rA2d);
+  Clobber(rs_rA3d);
+  Clobber(rs_rA4d);
+  Clobber(rs_rA5d);
+  Clobber(rs_rA6d);
+  Clobber(rs_rA7d);
+  Clobber(rs_rT0d);
+  Clobber(rs_rT1d);
+  Clobber(rs_rT2d);
+  Clobber(rs_rT3d);
+  Clobber(rs_rT8d);
+  Clobber(rs_rT9d);
+  Clobber(rs_rK0d);
+  Clobber(rs_rK1d);
+  Clobber(rs_rGPd);
+  Clobber(rs_rFPd);
+  Clobber(rs_rRAd);
+
+  Clobber(rs_rF0);
+  Clobber(rs_rF1);
+  Clobber(rs_rF2);
+  Clobber(rs_rF3);
+  Clobber(rs_rF4);
+  Clobber(rs_rF5);
+  Clobber(rs_rF6);
+  Clobber(rs_rF7);
+  Clobber(rs_rF8);
+  Clobber(rs_rF9);
+  Clobber(rs_rF10);
+  Clobber(rs_rF11);
+  Clobber(rs_rF12);
+  Clobber(rs_rF13);
+  Clobber(rs_rF14);
+  Clobber(rs_rF15);
+  Clobber(rs_rD0);
+  Clobber(rs_rD1);
+  Clobber(rs_rD2);
+  Clobber(rs_rD3);
+  Clobber(rs_rD4);
+  Clobber(rs_rD5);
+  Clobber(rs_rD6);
+  Clobber(rs_rD7);
+}
+
+RegLocation Mips64Mir2Lir::GetReturnWideAlt() {
+  UNIMPLEMENTED(FATAL) << "No GetReturnWideAlt for MIPS64";
+  RegLocation res = LocCReturnWide();
+  return res;
+}
+
+RegLocation Mips64Mir2Lir::GetReturnAlt() {
+  UNIMPLEMENTED(FATAL) << "No GetReturnAlt for MIPS64";
+  RegLocation res = LocCReturn();
+  return res;
+}
+
+/* To be used when explicitly managing register use */
+void Mips64Mir2Lir::LockCallTemps() {
+  LockTemp(rs_rMIPS64_ARG0);
+  LockTemp(rs_rMIPS64_ARG1);
+  LockTemp(rs_rMIPS64_ARG2);
+  LockTemp(rs_rMIPS64_ARG3);
+  LockTemp(rs_rMIPS64_ARG4);
+  LockTemp(rs_rMIPS64_ARG5);
+  LockTemp(rs_rMIPS64_ARG6);
+  LockTemp(rs_rMIPS64_ARG7);
+}
+
+/* To be used when explicitly managing register use */
+void Mips64Mir2Lir::FreeCallTemps() {
+  FreeTemp(rs_rMIPS64_ARG0);
+  FreeTemp(rs_rMIPS64_ARG1);
+  FreeTemp(rs_rMIPS64_ARG2);
+  FreeTemp(rs_rMIPS64_ARG3);
+  FreeTemp(rs_rMIPS64_ARG4);
+  FreeTemp(rs_rMIPS64_ARG5);
+  FreeTemp(rs_rMIPS64_ARG6);
+  FreeTemp(rs_rMIPS64_ARG7);
+  FreeTemp(TargetReg(kHiddenArg));
+}
+
+bool Mips64Mir2Lir::GenMemBarrier(MemBarrierKind barrier_kind ATTRIBUTE_UNUSED) {
+  if (cu_->compiler_driver->GetInstructionSetFeatures()->IsSmp()) {
+    NewLIR1(kMips64Sync, 0 /* Only stype currently supported */);
+    return true;
+  } else {
+    return false;
+  }
+}
+
+void Mips64Mir2Lir::CompilerInitializeRegAlloc() {
+  reg_pool_.reset(new (arena_) RegisterPool(this, arena_, core_regs32, core_regs64 , sp_regs,
+                                        dp_regs, reserved_regs32, reserved_regs64,
+                                        core_temps32, core_temps64, sp_temps,
+                                        dp_temps));
+
+  // Target-specific adjustments.
+
+  // Alias single precision floats to appropriate half of overlapping double.
+  for (RegisterInfo* info : reg_pool_->sp_regs_) {
+    int sp_reg_num = info->GetReg().GetRegNum();
+    int dp_reg_num = sp_reg_num;
+    RegStorage dp_reg = RegStorage::Solo64(RegStorage::kFloatingPoint | dp_reg_num);
+    RegisterInfo* dp_reg_info = GetRegInfo(dp_reg);
+    // Double precision register's master storage should refer to itself.
+    DCHECK_EQ(dp_reg_info, dp_reg_info->Master());
+    // Redirect single precision's master storage to master.
+    info->SetMaster(dp_reg_info);
+    // Singles should show a single 32-bit mask bit, at first referring to the low half.
+    DCHECK_EQ(info->StorageMask(), 0x1U);
+  }
+
+  // Alias 32bit W registers to corresponding 64bit X registers.
+  for (RegisterInfo* info : reg_pool_->core_regs_) {
+    int d_reg_num = info->GetReg().GetRegNum();
+    RegStorage d_reg = RegStorage::Solo64(d_reg_num);
+    RegisterInfo* d_reg_info = GetRegInfo(d_reg);
+    // 64bit D register's master storage should refer to itself.
+    DCHECK_EQ(d_reg_info, d_reg_info->Master());
+    // Redirect 32bit master storage to 64bit D.
+    info->SetMaster(d_reg_info);
+    // 32bit should show a single 32-bit mask bit, at first referring to the low half.
+    DCHECK_EQ(info->StorageMask(), 0x1U);
+  }
+
+  // Don't start allocating temps at r0/s0/d0 or you may clobber return regs in early-exit methods.
+  // TODO: adjust when we roll to hard float calling convention.
+  reg_pool_->next_core_reg_ = 2;
+  reg_pool_->next_sp_reg_ = 2;
+  reg_pool_->next_dp_reg_ = 1;
+}
+
+/*
+ * In the Arm code a it is typical to use the link register
+ * to hold the target address.  However, for Mips64 we must
+ * ensure that all branch instructions can be restarted if
+ * there is a trap in the shadow.  Allocate a temp register.
+ */
+RegStorage Mips64Mir2Lir::LoadHelper(QuickEntrypointEnum trampoline) {
+  // NOTE: native pointer.
+  LoadWordDisp(rs_rMIPS64_SELF, GetThreadOffset<8>(trampoline).Int32Value(), rs_rT9d);
+  return rs_rT9d;
+}
+
+LIR* Mips64Mir2Lir::CheckSuspendUsingLoad() {
+  RegStorage tmp = AllocTemp();
+  // NOTE: native pointer.
+  LoadWordDisp(rs_rMIPS64_SELF, Thread::ThreadSuspendTriggerOffset<8>().Int32Value(), tmp);
+  LIR *inst = LoadWordDisp(tmp, 0, tmp);
+  FreeTemp(tmp);
+  return inst;
+}
+
+LIR* Mips64Mir2Lir::GenAtomic64Load(RegStorage r_base, int displacement, RegStorage r_dest) {
+  DCHECK(!r_dest.IsFloat());  // See RegClassForFieldLoadStore().
+  ClobberCallerSave();
+  LockCallTemps();  // Using fixed registers.
+  RegStorage reg_ptr = TargetReg(kArg0);
+  OpRegRegImm(kOpAdd, reg_ptr, r_base, displacement);
+  RegStorage r_tgt = LoadHelper(kQuickA64Load);
+  LIR *ret = OpReg(kOpBlx, r_tgt);
+  OpRegCopy(r_dest, TargetReg(kRet0));
+  return ret;
+}
+
+LIR* Mips64Mir2Lir::GenAtomic64Store(RegStorage r_base, int displacement, RegStorage r_src) {
+  DCHECK(!r_src.IsFloat());  // See RegClassForFieldLoadStore().
+  DCHECK(!r_src.IsPair());
+  ClobberCallerSave();
+  LockCallTemps();  // Using fixed registers.
+  RegStorage temp_ptr = AllocTemp();
+  OpRegRegImm(kOpAdd, temp_ptr, r_base, displacement);
+  RegStorage temp_value = AllocTemp();
+  OpRegCopy(temp_value, r_src);
+  OpRegCopy(TargetReg(kArg0), temp_ptr);
+  OpRegCopy(TargetReg(kArg1), temp_value);
+  FreeTemp(temp_ptr);
+  FreeTemp(temp_value);
+  RegStorage r_tgt = LoadHelper(kQuickA64Store);
+  return OpReg(kOpBlx, r_tgt);
+}
+
+void Mips64Mir2Lir::SpillCoreRegs() {
+  if (num_core_spills_ == 0) {
+    return;
+  }
+  uint32_t mask = core_spill_mask_;
+  // Start saving from offset 0 so that ra ends up on the top of the frame.
+  int offset = 0;
+  OpRegImm(kOpSub, rs_rSPd, num_core_spills_ * 8);
+  for (int reg = 0; mask; mask >>= 1, reg++) {
+    if (mask & 0x1) {
+      StoreWordDisp(rs_rMIPS64_SP, offset, RegStorage::Solo64(reg));
+      offset += 8;
+    }
+  }
+}
+
+void Mips64Mir2Lir::UnSpillCoreRegs() {
+  if (num_core_spills_ == 0) {
+    return;
+  }
+  uint32_t mask = core_spill_mask_;
+  int offset = frame_size_ - num_core_spills_ * 8;
+  for (int reg = 0; mask; mask >>= 1, reg++) {
+    if (mask & 0x1) {
+      LoadWordDisp(rs_rMIPS64_SP, offset, RegStorage::Solo64(reg));
+      offset += 8;
+    }
+  }
+  OpRegImm(kOpAdd, rs_rSPd, frame_size_);
+}
+
+bool Mips64Mir2Lir::IsUnconditionalBranch(LIR* lir) {
+  return (lir->opcode == kMips64B);
+}
+
+RegisterClass Mips64Mir2Lir::RegClassForFieldLoadStore(OpSize size, bool is_volatile) {
+  if (UNLIKELY(is_volatile)) {
+    // On Mips64, atomic 64-bit load/store requires a core register.
+    // Smaller aligned load/store is atomic for both core and fp registers.
+    if (size == k64 || size == kDouble) {
+      return kCoreReg;
+    }
+  }
+  // TODO: Verify that both core and fp registers are suitable for smaller sizes.
+  return RegClassBySize(size);
+}
+
+Mips64Mir2Lir::Mips64Mir2Lir(CompilationUnit* cu, MIRGraph* mir_graph, ArenaAllocator* arena)
+    : Mir2Lir(cu, mir_graph, arena), in_to_reg_storage_mips64_mapper_(this) {
+  for (int i = 0; i < kMips64Last; i++) {
+    DCHECK_EQ(Mips64Mir2Lir::EncodingMap[i].opcode, i)
+        << "Encoding order for " << Mips64Mir2Lir::EncodingMap[i].name
+        << " is wrong: expecting " << i << ", seeing "
+        << static_cast<int>(Mips64Mir2Lir::EncodingMap[i].opcode);
+  }
+}
+
+Mir2Lir* Mips64CodeGenerator(CompilationUnit* const cu, MIRGraph* const mir_graph,
+                             ArenaAllocator* const arena) {
+  return new Mips64Mir2Lir(cu, mir_graph, arena);
+}
+
+uint64_t Mips64Mir2Lir::GetTargetInstFlags(int opcode) {
+  DCHECK(!IsPseudoLirOp(opcode));
+  return Mips64Mir2Lir::EncodingMap[opcode].flags;
+}
+
+const char* Mips64Mir2Lir::GetTargetInstName(int opcode) {
+  DCHECK(!IsPseudoLirOp(opcode));
+  return Mips64Mir2Lir::EncodingMap[opcode].name;
+}
+
+const char* Mips64Mir2Lir::GetTargetInstFmt(int opcode) {
+  DCHECK(!IsPseudoLirOp(opcode));
+  return Mips64Mir2Lir::EncodingMap[opcode].fmt;
+}
+
+void Mips64Mir2Lir::GenBreakpoint(int code) {
+    NewLIR1(kMips64Break, code);
+}
+
+}  // namespace art
diff --git a/compiler/dex/quick/mips64/utility_mips64.cc b/compiler/dex/quick/mips64/utility_mips64.cc
new file mode 100644
index 0000000..38e354c
--- /dev/null
+++ b/compiler/dex/quick/mips64/utility_mips64.cc
@@ -0,0 +1,875 @@
+/*
+ * Copyright (C) 2015 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "codegen_mips64.h"
+
+#include "arch/mips64/instruction_set_features_mips64.h"
+#include "base/logging.h"
+#include "dex/quick/mir_to_lir-inl.h"
+#include "dex/reg_storage_eq.h"
+#include "driver/compiler_driver.h"
+#include "mips64_lir.h"
+
+namespace art {
+
+/* This file contains codegen for the MIPS64 ISA. */
+
+LIR* Mips64Mir2Lir::OpFpRegCopy(RegStorage r_dest, RegStorage r_src) {
+  int opcode;
+  // Must be both DOUBLE or both not DOUBLE.
+  DCHECK_EQ(r_dest.Is64Bit(), r_src.Is64Bit());
+  if (r_dest.Is64Bit()) {
+    if (r_dest.IsDouble()) {
+      if (r_src.IsDouble()) {
+        opcode = kMips64Fmovd;
+      } else {
+        // Note the operands are swapped for the dmtc1 instr.
+        RegStorage t_opnd = r_src;
+        r_src = r_dest;
+        r_dest = t_opnd;
+        opcode = kMips64Dmtc1;
+      }
+    } else {
+      DCHECK(r_src.IsDouble());
+      opcode = kMips64Dmfc1;
+    }
+  } else {
+    if (r_dest.IsSingle()) {
+      if (r_src.IsSingle()) {
+        opcode = kMips64Fmovs;
+      } else {
+        // Note the operands are swapped for the mtc1 instr.
+        RegStorage t_opnd = r_src;
+        r_src = r_dest;
+        r_dest = t_opnd;
+        opcode = kMips64Mtc1;
+      }
+    } else {
+      DCHECK(r_src.IsSingle());
+      opcode = kMips64Mfc1;
+    }
+  }
+  LIR* res = RawLIR(current_dalvik_offset_, opcode, r_dest.GetReg(), r_src.GetReg());
+  if (!(cu_->disable_opt & (1 << kSafeOptimizations)) && r_dest == r_src) {
+    res->flags.is_nop = true;
+  }
+  return res;
+}
+
+bool Mips64Mir2Lir::InexpensiveConstantInt(int32_t value) {
+  // For encodings, see LoadConstantNoClobber below.
+  return ((value == 0) || IsUint<16>(value) || IsInt<16>(value));
+}
+
+bool Mips64Mir2Lir::InexpensiveConstantFloat(int32_t value) {
+  UNUSED(value);
+  return false;  // TUNING
+}
+
+bool Mips64Mir2Lir::InexpensiveConstantLong(int64_t value) {
+  UNUSED(value);
+  return false;  // TUNING
+}
+
+bool Mips64Mir2Lir::InexpensiveConstantDouble(int64_t value) {
+  UNUSED(value);
+  return false;  // TUNING
+}
+
+/*
+ * Load a immediate using a shortcut if possible; otherwise
+ * grab from the per-translation literal pool.  If target is
+ * a high register, build constant into a low register and copy.
+ *
+ * No additional register clobbering operation performed. Use this version when
+ * 1) r_dest is freshly returned from AllocTemp or
+ * 2) The codegen is under fixed register usage
+ */
+LIR* Mips64Mir2Lir::LoadConstantNoClobber(RegStorage r_dest, int value) {
+  LIR *res;
+
+  RegStorage r_dest_save = r_dest;
+  int is_fp_reg = r_dest.IsFloat();
+  if (is_fp_reg) {
+    DCHECK(r_dest.IsSingle());
+    r_dest = AllocTemp();
+  }
+
+  // See if the value can be constructed cheaply.
+  if (value == 0) {
+    res = NewLIR2(kMips64Move, r_dest.GetReg(), rZERO);
+  } else if (IsUint<16>(value)) {
+    // Use OR with (unsigned) immediate to encode 16b unsigned int.
+    res = NewLIR3(kMips64Ori, r_dest.GetReg(), rZERO, value);
+  } else if (IsInt<16>(value)) {
+    // Use ADD with (signed) immediate to encode 16b signed int.
+    res = NewLIR3(kMips64Addiu, r_dest.GetReg(), rZERO, value);
+  } else {
+    res = NewLIR2(kMips64Lui, r_dest.GetReg(), value >> 16);
+    if (value & 0xffff)
+      NewLIR3(kMips64Ori, r_dest.GetReg(), r_dest.GetReg(), value);
+  }
+
+  if (is_fp_reg) {
+    NewLIR2(kMips64Mtc1, r_dest.GetReg(), r_dest_save.GetReg());
+    FreeTemp(r_dest);
+  }
+
+  return res;
+}
+
+LIR* Mips64Mir2Lir::OpUnconditionalBranch(LIR* target) {
+  LIR* res = NewLIR1(kMips64B, 0 /* offset to be patched during assembly*/);
+  res->target = target;
+  return res;
+}
+
+LIR* Mips64Mir2Lir::OpReg(OpKind op, RegStorage r_dest_src) {
+  Mips64OpCode opcode = kMips64Nop;
+  switch (op) {
+    case kOpBlx:
+      opcode = kMips64Jalr;
+      break;
+    case kOpBx:
+      return NewLIR2(kMips64Jalr, rZERO, r_dest_src.GetReg());
+      break;
+    default:
+      LOG(FATAL) << "Bad case in OpReg";
+  }
+  return NewLIR2(opcode, rRAd, r_dest_src.GetReg());
+}
+
+LIR* Mips64Mir2Lir::OpRegImm(OpKind op, RegStorage r_dest_src1, int value) {
+  LIR *res;
+  bool neg = (value < 0);
+  int abs_value = (neg) ? -value : value;
+  bool short_form = (abs_value & 0xff) == abs_value;
+  bool is64bit = r_dest_src1.Is64Bit();
+  RegStorage r_scratch;
+  Mips64OpCode opcode = kMips64Nop;
+  switch (op) {
+    case kOpAdd:
+      return OpRegRegImm(op, r_dest_src1, r_dest_src1, value);
+    case kOpSub:
+      return OpRegRegImm(op, r_dest_src1, r_dest_src1, value);
+    default:
+      LOG(FATAL) << "Bad case in OpRegImm";
+  }
+  if (short_form) {
+    res = NewLIR2(opcode, r_dest_src1.GetReg(), abs_value);
+  } else {
+    if (is64bit) {
+      r_scratch = AllocTempWide();
+      res = LoadConstantWide(r_scratch, value);
+    } else {
+      r_scratch = AllocTemp();
+      res = LoadConstant(r_scratch, value);
+    }
+    if (op == kOpCmp) {
+      NewLIR2(opcode, r_dest_src1.GetReg(), r_scratch.GetReg());
+    } else {
+      NewLIR3(opcode, r_dest_src1.GetReg(), r_dest_src1.GetReg(), r_scratch.GetReg());
+    }
+  }
+  return res;
+}
+
+LIR* Mips64Mir2Lir::OpRegRegReg(OpKind op, RegStorage r_dest,
+                                RegStorage r_src1, RegStorage r_src2) {
+  Mips64OpCode opcode = kMips64Nop;
+  bool is64bit = r_dest.Is64Bit() || r_src1.Is64Bit() || r_src2.Is64Bit();
+
+  switch (op) {
+    case kOpAdd:
+      if (is64bit) {
+        opcode = kMips64Daddu;
+      } else {
+        opcode = kMips64Addu;
+      }
+      break;
+    case kOpSub:
+      if (is64bit) {
+        opcode = kMips64Dsubu;
+      } else {
+        opcode = kMips64Subu;
+      }
+      break;
+    case kOpAnd:
+      opcode = kMips64And;
+      break;
+    case kOpMul:
+      opcode = kMips64Mul;
+      break;
+    case kOpOr:
+      opcode = kMips64Or;
+      break;
+    case kOpXor:
+      opcode = kMips64Xor;
+      break;
+    case kOpLsl:
+      if (is64bit) {
+        opcode = kMips64Dsllv;
+      } else {
+        opcode = kMips64Sllv;
+      }
+      break;
+    case kOpLsr:
+      if (is64bit) {
+        opcode = kMips64Dsrlv;
+      } else {
+        opcode = kMips64Srlv;
+      }
+      break;
+    case kOpAsr:
+      if (is64bit) {
+        opcode = kMips64Dsrav;
+      } else {
+        opcode = kMips64Srav;
+      }
+      break;
+    case kOpAdc:
+    case kOpSbc:
+      LOG(FATAL) << "No carry bit on MIPS64";
+      break;
+    default:
+      LOG(FATAL) << "Bad case in OpRegRegReg";
+      break;
+  }
+  return NewLIR3(opcode, r_dest.GetReg(), r_src1.GetReg(), r_src2.GetReg());
+}
+
+LIR* Mips64Mir2Lir::OpRegRegImm(OpKind op, RegStorage r_dest, RegStorage r_src1, int value) {
+  LIR *res;
+  Mips64OpCode opcode = kMips64Nop;
+  bool short_form = true;
+  bool is64bit = r_dest.Is64Bit() || r_src1.Is64Bit();
+
+  switch (op) {
+    case kOpAdd:
+      if (is64bit) {
+        if (IS_SIMM16(value)) {
+          opcode = kMips64Daddiu;
+        } else {
+          short_form = false;
+          opcode = kMips64Daddu;
+        }
+      } else {
+        if (IS_SIMM16(value)) {
+          opcode = kMips64Addiu;
+        } else {
+          short_form = false;
+          opcode = kMips64Addu;
+        }
+      }
+      break;
+    case kOpSub:
+      if (is64bit) {
+        if (IS_SIMM16((-value))) {
+          value = -value;
+          opcode = kMips64Daddiu;
+        } else {
+          short_form = false;
+          opcode = kMips64Dsubu;
+        }
+      } else {
+        if (IS_SIMM16((-value))) {
+          value = -value;
+          opcode = kMips64Addiu;
+        } else {
+          short_form = false;
+          opcode = kMips64Subu;
+        }
+      }
+      break;
+    case kOpLsl:
+      if (is64bit) {
+        DCHECK(value >= 0 && value <= 63);
+        if (value >= 0 && value <= 31) {
+          opcode = kMips64Dsll;
+        } else {
+          opcode = kMips64Dsll32;
+          value = value - 32;
+        }
+      } else {
+        DCHECK(value >= 0 && value <= 31);
+        opcode = kMips64Sll;
+      }
+      break;
+    case kOpLsr:
+      if (is64bit) {
+        DCHECK(value >= 0 && value <= 63);
+        if (value >= 0 && value <= 31) {
+          opcode = kMips64Dsrl;
+        } else {
+          opcode = kMips64Dsrl32;
+          value = value - 32;
+        }
+      } else {
+        DCHECK(value >= 0 && value <= 31);
+        opcode = kMips64Srl;
+      }
+      break;
+    case kOpAsr:
+      if (is64bit) {
+        DCHECK(value >= 0 && value <= 63);
+        if (value >= 0 && value <= 31) {
+          opcode = kMips64Dsra;
+        } else {
+          opcode = kMips64Dsra32;
+          value = value - 32;
+        }
+      } else {
+        DCHECK(value >= 0 && value <= 31);
+        opcode = kMips64Sra;
+      }
+      break;
+    case kOpAnd:
+      if (IS_UIMM16((value))) {
+        opcode = kMips64Andi;
+      } else {
+        short_form = false;
+        opcode = kMips64And;
+      }
+      break;
+    case kOpOr:
+      if (IS_UIMM16((value))) {
+        opcode = kMips64Ori;
+      } else {
+        short_form = false;
+        opcode = kMips64Or;
+      }
+      break;
+    case kOpXor:
+      if (IS_UIMM16((value))) {
+        opcode = kMips64Xori;
+      } else {
+        short_form = false;
+        opcode = kMips64Xor;
+      }
+      break;
+    case kOpMul:
+      short_form = false;
+      opcode = kMips64Mul;
+      break;
+    default:
+      LOG(FATAL) << "Bad case in OpRegRegImm";
+      break;
+  }
+
+  if (short_form) {
+    res = NewLIR3(opcode, r_dest.GetReg(), r_src1.GetReg(), value);
+  } else {
+    if (r_dest != r_src1) {
+      res = LoadConstant(r_dest, value);
+      NewLIR3(opcode, r_dest.GetReg(), r_src1.GetReg(), r_dest.GetReg());
+    } else {
+      if (is64bit) {
+        RegStorage r_scratch = AllocTempWide();
+        res = LoadConstantWide(r_scratch, value);
+        NewLIR3(opcode, r_dest.GetReg(), r_src1.GetReg(), r_scratch.GetReg());
+      } else {
+        RegStorage r_scratch = AllocTemp();
+        res = LoadConstant(r_scratch, value);
+        NewLIR3(opcode, r_dest.GetReg(), r_src1.GetReg(), r_scratch.GetReg());
+      }
+    }
+  }
+  return res;
+}
+
+LIR* Mips64Mir2Lir::OpRegReg(OpKind op, RegStorage r_dest_src1, RegStorage r_src2) {
+  Mips64OpCode opcode = kMips64Nop;
+  LIR *res;
+  switch (op) {
+    case kOpMov:
+      opcode = kMips64Move;
+      break;
+    case kOpMvn:
+      return NewLIR3(kMips64Nor, r_dest_src1.GetReg(), r_src2.GetReg(), rZEROd);
+    case kOpNeg:
+      if (r_dest_src1.Is64Bit())
+        return NewLIR3(kMips64Dsubu, r_dest_src1.GetReg(), rZEROd, r_src2.GetReg());
+      else
+        return NewLIR3(kMips64Subu, r_dest_src1.GetReg(), rZERO, r_src2.GetReg());
+    case kOpAdd:
+    case kOpAnd:
+    case kOpMul:
+    case kOpOr:
+    case kOpSub:
+    case kOpXor:
+      return OpRegRegReg(op, r_dest_src1, r_dest_src1, r_src2);
+    case kOp2Byte:
+      res = NewLIR2(kMips64Seb, r_dest_src1.GetReg(), r_src2.GetReg());
+      return res;
+    case kOp2Short:
+      res = NewLIR2(kMips64Seh, r_dest_src1.GetReg(), r_src2.GetReg());
+      return res;
+    case kOp2Char:
+       return NewLIR3(kMips64Andi, r_dest_src1.GetReg(), r_src2.GetReg(), 0xFFFF);
+    default:
+      LOG(FATAL) << "Bad case in OpRegReg";
+      UNREACHABLE();
+  }
+  return NewLIR2(opcode, r_dest_src1.GetReg(), r_src2.GetReg());
+}
+
+LIR* Mips64Mir2Lir::OpMovRegMem(RegStorage r_dest, RegStorage r_base, int offset,
+                              MoveType move_type) {
+  UNUSED(r_dest, r_base, offset, move_type);
+  UNIMPLEMENTED(FATAL);
+  UNREACHABLE();
+}
+
+LIR* Mips64Mir2Lir::OpMovMemReg(RegStorage r_base, int offset,
+                                RegStorage r_src, MoveType move_type) {
+  UNUSED(r_base, offset, r_src, move_type);
+  UNIMPLEMENTED(FATAL);
+  UNREACHABLE();
+}
+
+LIR* Mips64Mir2Lir::OpCondRegReg(OpKind op, ConditionCode cc,
+                                 RegStorage r_dest, RegStorage r_src) {
+  UNUSED(op, cc, r_dest, r_src);
+  LOG(FATAL) << "Unexpected use of OpCondRegReg for MIPS64";
+  UNREACHABLE();
+}
+
+LIR* Mips64Mir2Lir::LoadConstantWide(RegStorage r_dest, int64_t value) {
+  LIR *res = nullptr;
+  DCHECK(r_dest.Is64Bit());
+  RegStorage r_dest_save = r_dest;
+  int is_fp_reg = r_dest.IsFloat();
+  if (is_fp_reg) {
+    DCHECK(r_dest.IsDouble());
+    r_dest = AllocTemp();
+  }
+
+  int bit31 = (value & UINT64_C(0x80000000)) != 0;
+
+  // Loads with 1 instruction.
+  if (IsUint<16>(value)) {
+    res = NewLIR3(kMips64Ori, r_dest.GetReg(), rZEROd, value);
+  } else if (IsInt<16>(value)) {
+    res = NewLIR3(kMips64Daddiu, r_dest.GetReg(), rZEROd, value);
+  } else if ((value & 0xFFFF) == 0 && IsInt<16>(value >> 16)) {
+    res = NewLIR2(kMips64Lui, r_dest.GetReg(), value >> 16);
+  } else if (IsInt<32>(value)) {
+    // Loads with 2 instructions.
+    res = NewLIR2(kMips64Lui, r_dest.GetReg(), value >> 16);
+    NewLIR3(kMips64Ori, r_dest.GetReg(), r_dest.GetReg(), value);
+  } else if ((value & 0xFFFF0000) == 0 && IsInt<16>(value >> 32)) {
+    res = NewLIR3(kMips64Ori, r_dest.GetReg(), rZEROd, value);
+    NewLIR2(kMips64Dahi, r_dest.GetReg(), value >> 32);
+  } else if ((value & UINT64_C(0xFFFFFFFF0000)) == 0) {
+    res = NewLIR3(kMips64Ori, r_dest.GetReg(), rZEROd, value);
+    NewLIR2(kMips64Dati, r_dest.GetReg(), value >> 48);
+  } else if ((value & 0xFFFF) == 0 && (value >> 32) >= (-32768 - bit31) &&
+             (value >> 32) <= (32767 - bit31)) {
+    res = NewLIR2(kMips64Lui, r_dest.GetReg(), value >> 16);
+    NewLIR2(kMips64Dahi, r_dest.GetReg(), (value >> 32) + bit31);
+  } else if ((value & 0xFFFF) == 0 && ((value >> 31) & 0x1FFFF) == ((0x20000 - bit31) & 0x1FFFF)) {
+    res = NewLIR2(kMips64Lui, r_dest.GetReg(), value >> 16);
+    NewLIR2(kMips64Dati, r_dest.GetReg(), (value >> 48) + bit31);
+  } else {
+    int64_t tmp = value;
+    int shift_cnt = 0;
+    while ((tmp & 1) == 0) {
+      tmp >>= 1;
+      shift_cnt++;
+    }
+
+    if (IsUint<16>(tmp)) {
+      res = NewLIR3(kMips64Ori, r_dest.GetReg(), rZEROd, tmp);
+      NewLIR3((shift_cnt < 32) ? kMips64Dsll : kMips64Dsll32, r_dest.GetReg(), r_dest.GetReg(),
+              shift_cnt & 0x1F);
+    } else if (IsInt<16>(tmp)) {
+      res = NewLIR3(kMips64Daddiu, r_dest.GetReg(), rZEROd, tmp);
+      NewLIR3((shift_cnt < 32) ? kMips64Dsll : kMips64Dsll32, r_dest.GetReg(), r_dest.GetReg(),
+              shift_cnt & 0x1F);
+    } else if (IsInt<32>(tmp)) {
+      // Loads with 3 instructions.
+      res = NewLIR2(kMips64Lui, r_dest.GetReg(), tmp >> 16);
+      NewLIR3(kMips64Ori, r_dest.GetReg(), r_dest.GetReg(), tmp);
+      NewLIR3((shift_cnt < 32) ? kMips64Dsll : kMips64Dsll32, r_dest.GetReg(), r_dest.GetReg(),
+              shift_cnt & 0x1F);
+    } else {
+      tmp = value >> 16;
+      shift_cnt = 16;
+      while ((tmp & 1) == 0) {
+        tmp >>= 1;
+        shift_cnt++;
+      }
+
+      if (IsUint<16>(tmp)) {
+        res = NewLIR3(kMips64Ori, r_dest.GetReg(), rZEROd, tmp);
+        NewLIR3((shift_cnt < 32) ? kMips64Dsll : kMips64Dsll32, r_dest.GetReg(), r_dest.GetReg(),
+                shift_cnt & 0x1F);
+        NewLIR3(kMips64Ori, r_dest.GetReg(), r_dest.GetReg(), value);
+      } else if (IsInt<16>(tmp)) {
+        res = NewLIR3(kMips64Daddiu, r_dest.GetReg(), rZEROd, tmp);
+        NewLIR3((shift_cnt < 32) ? kMips64Dsll : kMips64Dsll32, r_dest.GetReg(), r_dest.GetReg(),
+                shift_cnt & 0x1F);
+        NewLIR3(kMips64Ori, r_dest.GetReg(), r_dest.GetReg(), value);
+      } else {
+        // Loads with 3-4 instructions.
+        uint64_t tmp2 = value;
+        if (((tmp2 >> 16) & 0xFFFF) != 0 || (tmp2 & 0xFFFFFFFF) == 0) {
+          res = NewLIR2(kMips64Lui, r_dest.GetReg(), tmp2 >> 16);
+        }
+        if ((tmp2 & 0xFFFF) != 0) {
+          if (res)
+            NewLIR3(kMips64Ori, r_dest.GetReg(), r_dest.GetReg(), tmp2);
+          else
+            res = NewLIR3(kMips64Ori, r_dest.GetReg(), rZEROd, tmp2);
+        }
+        if (bit31) {
+          tmp2 += UINT64_C(0x100000000);
+        }
+        if (((tmp2 >> 32) & 0xFFFF) != 0) {
+          NewLIR2(kMips64Dahi, r_dest.GetReg(), tmp2 >> 32);
+        }
+        if (tmp2 & UINT64_C(0x800000000000)) {
+          tmp2 += UINT64_C(0x1000000000000);
+        }
+        if ((tmp2 >> 48) != 0) {
+          NewLIR2(kMips64Dati, r_dest.GetReg(), tmp2 >> 48);
+        }
+      }
+    }
+  }
+
+  if (is_fp_reg) {
+    NewLIR2(kMips64Dmtc1, r_dest.GetReg(), r_dest_save.GetReg());
+    FreeTemp(r_dest);
+  }
+
+  return res;
+}
+
+/* Load value from base + scaled index. */
+LIR* Mips64Mir2Lir::LoadBaseIndexed(RegStorage r_base, RegStorage r_index, RegStorage r_dest,
+                                    int scale, OpSize size) {
+  LIR *first = NULL;
+  LIR *res;
+  RegStorage t_reg;
+  Mips64OpCode opcode = kMips64Nop;
+  bool is64bit = r_dest.Is64Bit();
+  if (is64bit) {
+    t_reg = AllocTempWide();
+  } else {
+    t_reg = AllocTemp();
+  }
+
+  if (r_dest.IsFloat()) {
+    DCHECK(r_dest.IsSingle());
+    DCHECK((size == k32) || (size == kSingle) || (size == kReference));
+    size = kSingle;
+  } else if (is64bit) {
+    size = k64;
+  } else {
+    if (size == kSingle)
+      size = k32;
+  }
+
+  if (!scale) {
+    if (is64bit) {
+      first = NewLIR3(kMips64Daddu, t_reg.GetReg() , r_base.GetReg(), r_index.GetReg());
+    } else {
+      first = NewLIR3(kMips64Addu, t_reg.GetReg() , r_base.GetReg(), r_index.GetReg());
+    }
+  } else {
+    first = OpRegRegImm(kOpLsl, t_reg, r_index, scale);
+    NewLIR3(kMips64Daddu, t_reg.GetReg() , r_base.GetReg(), t_reg.GetReg());
+  }
+
+  switch (size) {
+    case k64:
+      opcode = kMips64Ld;
+      break;
+    case kSingle:
+      opcode = kMips64Flwc1;
+      break;
+    case k32:
+    case kReference:
+      opcode = kMips64Lw;
+      break;
+    case kUnsignedHalf:
+      opcode = kMips64Lhu;
+      break;
+    case kSignedHalf:
+      opcode = kMips64Lh;
+      break;
+    case kUnsignedByte:
+      opcode = kMips64Lbu;
+      break;
+    case kSignedByte:
+      opcode = kMips64Lb;
+      break;
+    default:
+      LOG(FATAL) << "Bad case in LoadBaseIndexed";
+  }
+
+  res = NewLIR3(opcode, r_dest.GetReg(), 0, t_reg.GetReg());
+  FreeTemp(t_reg);
+  return (first) ? first : res;
+}
+
+/* Store value base base + scaled index. */
+LIR* Mips64Mir2Lir::StoreBaseIndexed(RegStorage r_base, RegStorage r_index, RegStorage r_src,
+                                     int scale, OpSize size) {
+  LIR *first = NULL;
+  Mips64OpCode opcode = kMips64Nop;
+  RegStorage t_reg = AllocTemp();
+
+  if (r_src.IsFloat()) {
+    DCHECK(r_src.IsSingle());
+    DCHECK((size == k32) || (size == kSingle) || (size == kReference));
+    size = kSingle;
+  } else {
+    if (size == kSingle)
+      size = k32;
+  }
+
+  if (!scale) {
+    first = NewLIR3(kMips64Daddu, t_reg.GetReg() , r_base.GetReg(), r_index.GetReg());
+  } else {
+    first = OpRegRegImm(kOpLsl, t_reg, r_index, scale);
+    NewLIR3(kMips64Daddu, t_reg.GetReg() , r_base.GetReg(), t_reg.GetReg());
+  }
+
+  switch (size) {
+    case kSingle:
+      opcode = kMips64Fswc1;
+      break;
+    case k32:
+    case kReference:
+      opcode = kMips64Sw;
+      break;
+    case kUnsignedHalf:
+    case kSignedHalf:
+      opcode = kMips64Sh;
+      break;
+    case kUnsignedByte:
+    case kSignedByte:
+      opcode = kMips64Sb;
+      break;
+    default:
+      LOG(FATAL) << "Bad case in StoreBaseIndexed";
+  }
+  NewLIR3(opcode, r_src.GetReg(), 0, t_reg.GetReg());
+  return first;
+}
+
+// FIXME: don't split r_dest into 2 containers.
+LIR* Mips64Mir2Lir::LoadBaseDispBody(RegStorage r_base, int displacement, RegStorage r_dest,
+                                     OpSize size) {
+/*
+ * Load value from base + displacement.  Optionally perform null check
+ * on base (which must have an associated s_reg and MIR).  If not
+ * performing null check, incoming MIR can be null. IMPORTANT: this
+ * code must not allocate any new temps.  If a new register is needed
+ * and base and dest are the same, spill some other register to
+ * rlp and then restore.
+ */
+  LIR *res;
+  LIR *load = NULL;
+  Mips64OpCode opcode = kMips64Nop;
+  bool short_form = IS_SIMM16(displacement);
+
+  switch (size) {
+    case k64:
+    case kDouble:
+      r_dest = Check64BitReg(r_dest);
+      if (!r_dest.IsFloat())
+        opcode = kMips64Ld;
+      else
+        opcode = kMips64Fldc1;
+      DCHECK_EQ((displacement & 0x3), 0);
+      break;
+    case k32:
+    case kSingle:
+    case kReference:
+      opcode = kMips64Lw;
+      if (r_dest.IsFloat()) {
+        opcode = kMips64Flwc1;
+        DCHECK(r_dest.IsSingle());
+      }
+      DCHECK_EQ((displacement & 0x3), 0);
+      break;
+    case kUnsignedHalf:
+      opcode = kMips64Lhu;
+      DCHECK_EQ((displacement & 0x1), 0);
+      break;
+    case kSignedHalf:
+      opcode = kMips64Lh;
+      DCHECK_EQ((displacement & 0x1), 0);
+      break;
+    case kUnsignedByte:
+      opcode = kMips64Lbu;
+      break;
+    case kSignedByte:
+      opcode = kMips64Lb;
+      break;
+    default:
+      LOG(FATAL) << "Bad case in LoadBaseIndexedBody";
+  }
+
+  if (short_form) {
+    load = res = NewLIR3(opcode, r_dest.GetReg(), displacement, r_base.GetReg());
+  } else {
+    RegStorage r_tmp = (r_base == r_dest) ? AllocTemp() : r_dest;
+    res = OpRegRegImm(kOpAdd, r_tmp, r_base, displacement);
+    load = NewLIR3(opcode, r_dest.GetReg(), 0, r_tmp.GetReg());
+    if (r_tmp != r_dest)
+      FreeTemp(r_tmp);
+  }
+
+  if (mem_ref_type_ == ResourceMask::kDalvikReg) {
+    DCHECK_EQ(r_base, rs_rMIPS64_SP);
+    AnnotateDalvikRegAccess(load, displacement >> 2, true /* is_load */, r_dest.Is64Bit());
+  }
+  return res;
+}
+
+LIR* Mips64Mir2Lir::LoadBaseDisp(RegStorage r_base, int displacement, RegStorage r_dest,
+                                 OpSize size, VolatileKind is_volatile) {
+  if (UNLIKELY(is_volatile == kVolatile && (size == k64 || size == kDouble) &&
+      displacement & 0x7)) {
+    // TODO: use lld/scd instructions for Mips64.
+    // Do atomic 64-bit load.
+    return GenAtomic64Load(r_base, displacement, r_dest);
+  }
+
+  // TODO: base this on target.
+  if (size == kWord) {
+    size = k64;
+  }
+  LIR* load;
+  load = LoadBaseDispBody(r_base, displacement, r_dest, size);
+
+  if (UNLIKELY(is_volatile == kVolatile)) {
+    GenMemBarrier(kLoadAny);
+  }
+
+  return load;
+}
+
+// FIXME: don't split r_dest into 2 containers.
+LIR* Mips64Mir2Lir::StoreBaseDispBody(RegStorage r_base, int displacement, RegStorage r_src,
+                                      OpSize size) {
+  LIR *res;
+  LIR *store = NULL;
+  Mips64OpCode opcode = kMips64Nop;
+  bool short_form = IS_SIMM16(displacement);
+
+  switch (size) {
+    case k64:
+    case kDouble:
+      r_src = Check64BitReg(r_src);
+      if (!r_src.IsFloat())
+        opcode = kMips64Sd;
+      else
+        opcode = kMips64Fsdc1;
+      DCHECK_EQ((displacement & 0x3), 0);
+      break;
+    case k32:
+    case kSingle:
+    case kReference:
+      opcode = kMips64Sw;
+      if (r_src.IsFloat()) {
+        opcode = kMips64Fswc1;
+        DCHECK(r_src.IsSingle());
+      }
+      DCHECK_EQ((displacement & 0x3), 0);
+      break;
+    case kUnsignedHalf:
+    case kSignedHalf:
+      opcode = kMips64Sh;
+      DCHECK_EQ((displacement & 0x1), 0);
+      break;
+    case kUnsignedByte:
+    case kSignedByte:
+      opcode = kMips64Sb;
+      break;
+    default:
+      LOG(FATAL) << "Bad case in StoreBaseDispBody";
+  }
+
+  if (short_form) {
+    store = res = NewLIR3(opcode, r_src.GetReg(), displacement, r_base.GetReg());
+  } else {
+    RegStorage r_scratch = AllocTemp();
+    res = OpRegRegImm(kOpAdd, r_scratch, r_base, displacement);
+    store = NewLIR3(opcode, r_src.GetReg(), 0, r_scratch.GetReg());
+    FreeTemp(r_scratch);
+  }
+
+  if (mem_ref_type_ == ResourceMask::kDalvikReg) {
+    DCHECK_EQ(r_base, rs_rMIPS64_SP);
+    AnnotateDalvikRegAccess(store, displacement >> 2, false /* is_load */, r_src.Is64Bit());
+  }
+
+  return res;
+}
+
+LIR* Mips64Mir2Lir::StoreBaseDisp(RegStorage r_base, int displacement, RegStorage r_src,
+                                  OpSize size, VolatileKind is_volatile) {
+  if (is_volatile == kVolatile) {
+    // Ensure that prior accesses become visible to other threads first.
+    GenMemBarrier(kAnyStore);
+  }
+
+  LIR* store;
+  if (UNLIKELY(is_volatile == kVolatile && (size == k64 || size == kDouble) &&
+      displacement & 0x7)) {
+    // TODO - use lld/scd instructions for Mips64
+    // Do atomic 64-bit load.
+    store = GenAtomic64Store(r_base, displacement, r_src);
+  } else {
+    // TODO: base this on target.
+    if (size == kWord) {
+      size = k64;
+    }
+    store = StoreBaseDispBody(r_base, displacement, r_src, size);
+  }
+
+  if (UNLIKELY(is_volatile == kVolatile)) {
+    // Preserve order with respect to any subsequent volatile loads.
+    // We need StoreLoad, but that generally requires the most expensive barrier.
+    GenMemBarrier(kAnyAny);
+  }
+
+  return store;
+}
+
+LIR* Mips64Mir2Lir::OpMem(OpKind op, RegStorage r_base, int disp) {
+  UNUSED(op, r_base, disp);
+  LOG(FATAL) << "Unexpected use of OpMem for MIPS64";
+  UNREACHABLE();
+}
+
+LIR* Mips64Mir2Lir::OpCondBranch(ConditionCode cc, LIR* target) {
+  UNUSED(cc, target);
+  LOG(FATAL) << "Unexpected use of OpCondBranch for MIPS64";
+  UNREACHABLE();
+}
+
+LIR* Mips64Mir2Lir::InvokeTrampoline(OpKind op, RegStorage r_tgt, QuickEntrypointEnum trampoline) {
+  UNUSED(trampoline);  // The address of the trampoline is already loaded into r_tgt.
+  return OpReg(op, r_tgt);
+}
+
+}  // namespace art
diff --git a/compiler/dex/quick/quick_compiler.cc b/compiler/dex/quick/quick_compiler.cc
index fcf4716..13a6d9d 100644
--- a/compiler/dex/quick/quick_compiler.cc
+++ b/compiler/dex/quick/quick_compiler.cc
@@ -45,6 +45,7 @@
 #include "dex/quick/arm/backend_arm.h"
 #include "dex/quick/arm64/backend_arm64.h"
 #include "dex/quick/mips/backend_mips.h"
+#include "dex/quick/mips64/backend_mips64.h"
 #include "dex/quick/x86/backend_x86.h"
 
 namespace art {
@@ -87,7 +88,17 @@
     (1 << kPromoteCompilerTemps) |
     0,
     // 7 = kMips64.
-    ~0U
+    (1 << kLoadStoreElimination) |
+    (1 << kLoadHoisting) |
+    (1 << kSuppressLoads) |
+    (1 << kNullCheckElimination) |
+    (1 << kPromoteRegs) |
+    (1 << kTrackLiveTemps) |
+    (1 << kSafeOptimizations) |
+    (1 << kBBOpt) |
+    (1 << kMatch) |
+    (1 << kPromoteCompilerTemps) |
+    0
 };
 static_assert(sizeof(kDisabledOptimizationsPerISA) == 8 * sizeof(uint32_t),
               "kDisabledOpts unexpected");
@@ -119,7 +130,7 @@
     // 6 = kMips.
     nullptr,
     // 7 = kMips64.
-    ""
+    nullptr
 };
 static_assert(sizeof(kSupportedTypes) == 8 * sizeof(char*), "kSupportedTypes unexpected");
 
@@ -430,7 +441,7 @@
     // 6 = kMips.
     nullptr,
     // 7 = kMips64.
-    kAllOpcodes
+    nullptr
 };
 static_assert(sizeof(kUnsupportedOpcodes) == 8 * sizeof(int*), "kUnsupportedOpcodes unexpected");
 
@@ -451,7 +462,7 @@
     // 6 = kMips.
     0,
     // 7 = kMips64.
-    arraysize(kAllOpcodes),
+    0
 };
 static_assert(sizeof(kUnsupportedOpcodesSize) == 8 * sizeof(size_t),
               "kUnsupportedOpcodesSize unexpected");
@@ -624,12 +635,12 @@
   }
   CompilationUnit cu(driver->GetArenaPool(), instruction_set, driver, class_linker);
 
-  // TODO: Mips64 is not yet implemented.
   CHECK((cu.instruction_set == kThumb2) ||
         (cu.instruction_set == kArm64) ||
         (cu.instruction_set == kX86) ||
         (cu.instruction_set == kX86_64) ||
-        (cu.instruction_set == kMips));
+        (cu.instruction_set == kMips) ||
+        (cu.instruction_set == kMips64));
 
   // TODO: set this from command line
   constexpr bool compiler_flip_match = false;
@@ -798,6 +809,9 @@
     case kMips:
       mir_to_lir = MipsCodeGenerator(cu, cu->mir_graph.get(), &cu->arena);
       break;
+    case kMips64:
+      mir_to_lir = Mips64CodeGenerator(cu, cu->mir_graph.get(), &cu->arena);
+      break;
     case kX86:
       // Fall-through.
     case kX86_64:
diff --git a/compiler/dex/quick/ralloc_util.cc b/compiler/dex/quick/ralloc_util.cc
index 67fb804..682fa28 100644
--- a/compiler/dex/quick/ralloc_util.cc
+++ b/compiler/dex/quick/ralloc_util.cc
@@ -1355,7 +1355,7 @@
     default: res = LocCReturn(); break;
   }
   Clobber(res.reg);
-  if (cu_->instruction_set == kMips) {
+  if (cu_->instruction_set == kMips || cu_->instruction_set == kMips64) {
     MarkInUse(res.reg);
   } else {
     LockTemp(res.reg);
diff --git a/compiler/dex/quick/x86/target_x86.cc b/compiler/dex/quick/x86/target_x86.cc
index 8f97d1e..dbe4848 100755
--- a/compiler/dex/quick/x86/target_x86.cc
+++ b/compiler/dex/quick/x86/target_x86.cc
@@ -2202,6 +2202,7 @@
       StoreFinalValue(rl_dest, rl_result);
     } else {
       // Do the addition directly to memory.
+      ScopedMemRefType mem_ref_type(this, ResourceMask::kDalvikReg);
       OpMemReg(kOpAdd, rl_result, temp.GetReg());
     }
   }
diff --git a/compiler/driver/compiler_driver.cc b/compiler/driver/compiler_driver.cc
index 98abc18..d1291fa 100644
--- a/compiler/driver/compiler_driver.cc
+++ b/compiler/driver/compiler_driver.cc
@@ -358,6 +358,7 @@
       image_(image),
       image_classes_(image_classes),
       classes_to_compile_(compiled_classes),
+      had_hard_verifier_failure_(false),
       thread_count_(thread_count),
       stats_(new AOTCompilationStats),
       dedupe_enabled_(true),
@@ -616,6 +617,11 @@
   Verify(class_loader, dex_files, thread_pool, timings);
   VLOG(compiler) << "Verify: " << GetMemoryUsageString(false);
 
+  if (had_hard_verifier_failure_ && GetCompilerOptions().AbortOnHardVerifierFailure()) {
+    LOG(FATAL) << "Had a hard failure verifying all classes, and was asked to abort in such "
+               << "situations. Please check the log.";
+  }
+
   InitializeClasses(class_loader, dex_files, thread_pool, timings);
   VLOG(compiler) << "InitializeClasses: " << GetMemoryUsageString(false);
 
@@ -871,6 +877,11 @@
     const char* name = klass->GetDescriptor(&temp);
     if (data->image_class_descriptors_->find(name) != data->image_class_descriptors_->end()) {
       data->image_classes_.push_back(klass);
+    } else {
+      // Check whether it is initialized and has a clinit. They must be kept, too.
+      if (klass->IsInitialized() && klass->FindClassInitializer() != nullptr) {
+        data->image_classes_.push_back(klass);
+      }
     }
 
     return true;
@@ -1834,6 +1845,7 @@
                                                   verifier::MethodVerifier::kHardFailure) {
       LOG(ERROR) << "Verification failed on class " << PrettyDescriptor(descriptor)
                  << " because: " << error_msg;
+      manager->GetCompiler()->SetHadHardVerifierFailure();
     }
   } else if (!SkipClass(jclass_loader, dex_file, klass.Get())) {
     CHECK(klass->IsResolved()) << PrettyClass(klass.Get());
@@ -1843,6 +1855,7 @@
       // ClassLinker::VerifyClass throws, which isn't useful in the compiler.
       CHECK(soa.Self()->IsExceptionPending());
       soa.Self()->ClearException();
+      manager->GetCompiler()->SetHadHardVerifierFailure();
     }
 
     CHECK(klass->IsCompileTimeVerified() || klass->IsErroneous())
@@ -2171,8 +2184,10 @@
         InstructionSetHasGenericJniStub(instruction_set_)) {
       // Leaving this empty will trigger the generic JNI version
     } else {
-      compiled_method = compiler_->JniCompile(access_flags, method_idx, dex_file);
-      CHECK(compiled_method != nullptr);
+      if (instruction_set_ != kMips64) {  // Use generic JNI for Mips64 (temporarily).
+        compiled_method = compiler_->JniCompile(access_flags, method_idx, dex_file);
+        CHECK(compiled_method != nullptr);
+      }
     }
   } else if ((access_flags & kAccAbstract) != 0) {
     // Abstract methods don't have code.
diff --git a/compiler/driver/compiler_driver.h b/compiler/driver/compiler_driver.h
index 24b6f17..f949667 100644
--- a/compiler/driver/compiler_driver.h
+++ b/compiler/driver/compiler_driver.h
@@ -437,6 +437,10 @@
   // Get memory usage during compilation.
   std::string GetMemoryUsageString(bool extended) const;
 
+  void SetHadHardVerifierFailure() {
+    had_hard_verifier_failure_ = true;
+  }
+
  private:
   // These flags are internal to CompilerDriver for collecting INVOKE resolution statistics.
   // The only external contract is that unresolved method has flags 0 and resolved non-0.
@@ -575,6 +579,8 @@
   // included in the image.
   std::unique_ptr<std::set<std::string>> classes_to_compile_;
 
+  bool had_hard_verifier_failure_;
+
   size_t thread_count_;
 
   class AOTCompilationStats;
diff --git a/compiler/driver/compiler_options.cc b/compiler/driver/compiler_options.cc
index 09ec9a2..e436f52 100644
--- a/compiler/driver/compiler_options.cc
+++ b/compiler/driver/compiler_options.cc
@@ -30,6 +30,7 @@
       generate_gdb_information_(false),
       include_patch_information_(kDefaultIncludePatchInformation),
       top_k_profile_threshold_(kDefaultTopKProfileThreshold),
+      debuggable_(false),
       include_debug_symbols_(kDefaultIncludeDebugSymbols),
       implicit_null_checks_(true),
       implicit_so_checks_(true),
@@ -37,6 +38,7 @@
       compile_pic_(false),
       verbose_methods_(nullptr),
       pass_manager_options_(new PassManagerOptions),
+      abort_on_hard_verifier_failure_(false),
       init_failure_output_(nullptr) {
 }
 
@@ -49,6 +51,7 @@
                                  bool generate_gdb_information,
                                  bool include_patch_information,
                                  double top_k_profile_threshold,
+                                 bool debuggable,
                                  bool include_debug_symbols,
                                  bool implicit_null_checks,
                                  bool implicit_so_checks,
@@ -56,7 +59,8 @@
                                  bool compile_pic,
                                  const std::vector<std::string>* verbose_methods,
                                  PassManagerOptions* pass_manager_options,
-                                 std::ostream* init_failure_output
+                                 std::ostream* init_failure_output,
+                                 bool abort_on_hard_verifier_failure
                                  ) :  // NOLINT(whitespace/parens)
     compiler_filter_(compiler_filter),
     huge_method_threshold_(huge_method_threshold),
@@ -67,6 +71,7 @@
     generate_gdb_information_(generate_gdb_information),
     include_patch_information_(include_patch_information),
     top_k_profile_threshold_(top_k_profile_threshold),
+    debuggable_(debuggable),
     include_debug_symbols_(include_debug_symbols),
     implicit_null_checks_(implicit_null_checks),
     implicit_so_checks_(implicit_so_checks),
@@ -74,6 +79,7 @@
     compile_pic_(compile_pic),
     verbose_methods_(verbose_methods),
     pass_manager_options_(pass_manager_options),
+    abort_on_hard_verifier_failure_(abort_on_hard_verifier_failure),
     init_failure_output_(init_failure_output) {
 }
 
diff --git a/compiler/driver/compiler_options.h b/compiler/driver/compiler_options.h
index 0683d18..5042c75 100644
--- a/compiler/driver/compiler_options.h
+++ b/compiler/driver/compiler_options.h
@@ -62,6 +62,7 @@
                   bool generate_gdb_information,
                   bool include_patch_information,
                   double top_k_profile_threshold,
+                  bool debuggable,
                   bool include_debug_symbols,
                   bool implicit_null_checks,
                   bool implicit_so_checks,
@@ -69,7 +70,8 @@
                   bool compile_pic,
                   const std::vector<std::string>* verbose_methods,
                   PassManagerOptions* pass_manager_options,
-                  std::ostream* init_failure_output);
+                  std::ostream* init_failure_output,
+                  bool abort_on_hard_verifier_failure);
 
   CompilerFilter GetCompilerFilter() const {
     return compiler_filter_;
@@ -128,6 +130,10 @@
     return top_k_profile_threshold_;
   }
 
+  bool GetDebuggable() const {
+    return debuggable_;
+  }
+
   bool GetIncludeDebugSymbols() const {
     return include_debug_symbols_;
   }
@@ -178,6 +184,10 @@
     return pass_manager_options_.get();
   }
 
+  bool AbortOnHardVerifierFailure() const {
+    return abort_on_hard_verifier_failure_;
+  }
+
  private:
   CompilerFilter compiler_filter_;
   const size_t huge_method_threshold_;
@@ -189,6 +199,7 @@
   const bool include_patch_information_;
   // When using a profile file only the top K% of the profiled samples will be compiled.
   const double top_k_profile_threshold_;
+  const bool debuggable_;
   const bool include_debug_symbols_;
   const bool implicit_null_checks_;
   const bool implicit_so_checks_;
@@ -200,6 +211,10 @@
 
   std::unique_ptr<PassManagerOptions> pass_manager_options_;
 
+  // Abort compilation with an error if we find a class that fails verification with a hard
+  // failure.
+  const bool abort_on_hard_verifier_failure_;
+
   // Log initialization of initialization failures to this stream if not null.
   std::ostream* const init_failure_output_;
 
diff --git a/compiler/jit/jit_compiler.cc b/compiler/jit/jit_compiler.cc
index 0283791..04efa21 100644
--- a/compiler/jit/jit_compiler.cc
+++ b/compiler/jit/jit_compiler.cc
@@ -72,6 +72,7 @@
       false,
       false,
       CompilerOptions::kDefaultTopKProfileThreshold,
+      false,  // TODO: Think about debuggability of JIT-compiled code.
       false,
       false,
       false,
@@ -79,7 +80,8 @@
       false,  // pic
       nullptr,
       pass_manager_options,
-      nullptr));
+      nullptr,
+      false));
   const InstructionSet instruction_set = kRuntimeISA;
   instruction_set_features_.reset(InstructionSetFeatures::FromCppDefines());
   cumulative_logger_.reset(new CumulativeLogger("jit times"));
diff --git a/compiler/jni/quick/calling_convention.cc b/compiler/jni/quick/calling_convention.cc
index 95c2d40..d25acc7 100644
--- a/compiler/jni/quick/calling_convention.cc
+++ b/compiler/jni/quick/calling_convention.cc
@@ -20,6 +20,7 @@
 #include "jni/quick/arm/calling_convention_arm.h"
 #include "jni/quick/arm64/calling_convention_arm64.h"
 #include "jni/quick/mips/calling_convention_mips.h"
+#include "jni/quick/mips64/calling_convention_mips64.h"
 #include "jni/quick/x86/calling_convention_x86.h"
 #include "jni/quick/x86_64/calling_convention_x86_64.h"
 #include "utils.h"
@@ -38,6 +39,8 @@
       return new arm64::Arm64ManagedRuntimeCallingConvention(is_static, is_synchronized, shorty);
     case kMips:
       return new mips::MipsManagedRuntimeCallingConvention(is_static, is_synchronized, shorty);
+    case kMips64:
+      return new mips64::Mips64ManagedRuntimeCallingConvention(is_static, is_synchronized, shorty);
     case kX86:
       return new x86::X86ManagedRuntimeCallingConvention(is_static, is_synchronized, shorty);
     case kX86_64:
@@ -111,6 +114,8 @@
       return new arm64::Arm64JniCallingConvention(is_static, is_synchronized, shorty);
     case kMips:
       return new mips::MipsJniCallingConvention(is_static, is_synchronized, shorty);
+    case kMips64:
+      return new mips64::Mips64JniCallingConvention(is_static, is_synchronized, shorty);
     case kX86:
       return new x86::X86JniCallingConvention(is_static, is_synchronized, shorty);
     case kX86_64:
diff --git a/compiler/jni/quick/jni_compiler.cc b/compiler/jni/quick/jni_compiler.cc
index ba73828..2d9e03a 100644
--- a/compiler/jni/quick/jni_compiler.cc
+++ b/compiler/jni/quick/jni_compiler.cc
@@ -36,6 +36,7 @@
 #include "utils/arm/managed_register_arm.h"
 #include "utils/arm64/managed_register_arm64.h"
 #include "utils/mips/managed_register_mips.h"
+#include "utils/mips64/managed_register_mips64.h"
 #include "utils/x86/managed_register_x86.h"
 #include "thread.h"
 
@@ -329,7 +330,8 @@
   // 11. Save return value
   FrameOffset return_save_location = main_jni_conv->ReturnValueSaveLocation();
   if (main_jni_conv->SizeOfReturnValue() != 0 && !reference_return) {
-    if (instruction_set == kMips && main_jni_conv->GetReturnType() == Primitive::kPrimDouble &&
+    if ((instruction_set == kMips || instruction_set == kMips64) &&
+        main_jni_conv->GetReturnType() == Primitive::kPrimDouble &&
         return_save_location.Uint32Value() % 8 != 0) {
       // Ensure doubles are 8-byte aligned for MIPS
       return_save_location = FrameOffset(return_save_location.Uint32Value() + kMipsPointerSize);
diff --git a/compiler/jni/quick/mips64/calling_convention_mips64.cc b/compiler/jni/quick/mips64/calling_convention_mips64.cc
new file mode 100644
index 0000000..17325d6
--- /dev/null
+++ b/compiler/jni/quick/mips64/calling_convention_mips64.cc
@@ -0,0 +1,201 @@
+/*
+ * Copyright (C) 2015 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "calling_convention_mips64.h"
+
+#include "base/logging.h"
+#include "handle_scope-inl.h"
+#include "utils/mips64/managed_register_mips64.h"
+
+namespace art {
+namespace mips64 {
+
+static const GpuRegister kGpuArgumentRegisters[] = {
+  A0, A1, A2, A3, A4, A5, A6, A7
+};
+
+static const FpuRegister kFpuArgumentRegisters[] = {
+  F12, F13, F14, F15, F16, F17, F18, F19
+};
+
+// Calling convention
+ManagedRegister Mips64ManagedRuntimeCallingConvention::InterproceduralScratchRegister() {
+  return Mips64ManagedRegister::FromGpuRegister(T9);
+}
+
+ManagedRegister Mips64JniCallingConvention::InterproceduralScratchRegister() {
+  return Mips64ManagedRegister::FromGpuRegister(T9);
+}
+
+static ManagedRegister ReturnRegisterForShorty(const char* shorty) {
+  if (shorty[0] == 'F' || shorty[0] == 'D') {
+    return Mips64ManagedRegister::FromFpuRegister(F0);
+  } else if (shorty[0] == 'V') {
+    return Mips64ManagedRegister::NoRegister();
+  } else {
+    return Mips64ManagedRegister::FromGpuRegister(V0);
+  }
+}
+
+ManagedRegister Mips64ManagedRuntimeCallingConvention::ReturnRegister() {
+  return ReturnRegisterForShorty(GetShorty());
+}
+
+ManagedRegister Mips64JniCallingConvention::ReturnRegister() {
+  return ReturnRegisterForShorty(GetShorty());
+}
+
+ManagedRegister Mips64JniCallingConvention::IntReturnRegister() {
+  return Mips64ManagedRegister::FromGpuRegister(V0);
+}
+
+// Managed runtime calling convention
+
+ManagedRegister Mips64ManagedRuntimeCallingConvention::MethodRegister() {
+  return Mips64ManagedRegister::FromGpuRegister(A0);
+}
+
+bool Mips64ManagedRuntimeCallingConvention::IsCurrentParamInRegister() {
+  return false;  // Everything moved to stack on entry.
+}
+
+bool Mips64ManagedRuntimeCallingConvention::IsCurrentParamOnStack() {
+  return true;
+}
+
+ManagedRegister Mips64ManagedRuntimeCallingConvention::CurrentParamRegister() {
+  LOG(FATAL) << "Should not reach here";
+  return ManagedRegister::NoRegister();
+}
+
+FrameOffset Mips64ManagedRuntimeCallingConvention::CurrentParamStackOffset() {
+  CHECK(IsCurrentParamOnStack());
+  FrameOffset result =
+      FrameOffset(displacement_.Int32Value() +                 // displacement
+                  sizeof(StackReference<mirror::ArtMethod>) +  // Method ref
+                  (itr_slots_ * sizeof(uint32_t)));            // offset into in args
+  return result;
+}
+
+const ManagedRegisterEntrySpills& Mips64ManagedRuntimeCallingConvention::EntrySpills() {
+  // We spill the argument registers on MIPS64 to free them up for scratch use,
+  // we then assume all arguments are on the stack.
+  if ((entry_spills_.size() == 0) && (NumArgs() > 0)) {
+    int reg_index = 1;   // we start from A1, A0 holds ArtMethod*.
+
+    // We need to choose the correct register size since the managed
+    // stack uses 32bit stack slots.
+    ResetIterator(FrameOffset(0));
+    while (HasNext()) {
+      if (reg_index < 8) {
+        if (IsCurrentParamAFloatOrDouble()) {  // FP regs.
+          FpuRegister arg = kFpuArgumentRegisters[reg_index];
+          Mips64ManagedRegister reg = Mips64ManagedRegister::FromFpuRegister(arg);
+          entry_spills_.push_back(reg, IsCurrentParamADouble() ? 8 : 4);
+        } else {  // GP regs.
+          GpuRegister arg = kGpuArgumentRegisters[reg_index];
+          Mips64ManagedRegister reg = Mips64ManagedRegister::FromGpuRegister(arg);
+          entry_spills_.push_back(reg,
+                                  (IsCurrentParamALong() && (!IsCurrentParamAReference())) ? 8 : 4);
+        }
+        // e.g. A1, A2, F3, A4, F5, F6, A7
+        reg_index++;
+      }
+
+      Next();
+    }
+  }
+  return entry_spills_;
+}
+
+// JNI calling convention
+
+Mips64JniCallingConvention::Mips64JniCallingConvention(bool is_static, bool is_synchronized,
+                                                       const char* shorty)
+    : JniCallingConvention(is_static, is_synchronized, shorty, kFramePointerSize) {
+  callee_save_regs_.push_back(Mips64ManagedRegister::FromGpuRegister(S0));
+  callee_save_regs_.push_back(Mips64ManagedRegister::FromGpuRegister(S1));
+  callee_save_regs_.push_back(Mips64ManagedRegister::FromGpuRegister(S2));
+  callee_save_regs_.push_back(Mips64ManagedRegister::FromGpuRegister(S3));
+  callee_save_regs_.push_back(Mips64ManagedRegister::FromGpuRegister(S4));
+  callee_save_regs_.push_back(Mips64ManagedRegister::FromGpuRegister(S5));
+  callee_save_regs_.push_back(Mips64ManagedRegister::FromGpuRegister(S6));
+  callee_save_regs_.push_back(Mips64ManagedRegister::FromGpuRegister(S7));
+
+  callee_save_regs_.push_back(Mips64ManagedRegister::FromGpuRegister(GP));
+  callee_save_regs_.push_back(Mips64ManagedRegister::FromGpuRegister(SP));
+  callee_save_regs_.push_back(Mips64ManagedRegister::FromGpuRegister(S8));
+}
+
+uint32_t Mips64JniCallingConvention::CoreSpillMask() const {
+  // Compute spill mask to agree with callee saves initialized in the constructor
+  uint32_t result = 0;
+  result = 1 << S0 | 1 << S1 | 1 << S2 | 1 << S3 | 1 << S4 | 1 << S5 | 1 << S6 |
+           1 << S7 | 1 << GP | 1 << SP | 1 << S8;
+  return result;
+}
+
+ManagedRegister Mips64JniCallingConvention::ReturnScratchRegister() const {
+  return Mips64ManagedRegister::FromGpuRegister(AT);
+}
+
+size_t Mips64JniCallingConvention::FrameSize() {
+  // Mehtod* and callee save area size, local reference segment state
+  size_t frame_data_size = sizeof(StackReference<mirror::ArtMethod>) +
+      CalleeSaveRegisters().size() * kFramePointerSize + sizeof(uint32_t);
+  // References plus 2 words for HandleScope header
+  size_t handle_scope_size = HandleScope::SizeOf(kFramePointerSize, ReferenceCount());
+  // Plus return value spill area size
+  return RoundUp(frame_data_size + handle_scope_size + SizeOfReturnValue(), kStackAlignment);
+}
+
+size_t Mips64JniCallingConvention::OutArgSize() {
+  return RoundUp(NumberOfOutgoingStackArgs() * kFramePointerSize, kStackAlignment);
+}
+
+bool Mips64JniCallingConvention::IsCurrentParamInRegister() {
+  return itr_args_ < 8;
+}
+
+bool Mips64JniCallingConvention::IsCurrentParamOnStack() {
+  return !IsCurrentParamInRegister();
+}
+
+ManagedRegister Mips64JniCallingConvention::CurrentParamRegister() {
+  CHECK(IsCurrentParamInRegister());
+  if (IsCurrentParamAFloatOrDouble()) {
+    return Mips64ManagedRegister::FromFpuRegister(kFpuArgumentRegisters[itr_args_]);
+  } else {
+    return Mips64ManagedRegister::FromGpuRegister(kGpuArgumentRegisters[itr_args_]);
+  }
+}
+
+FrameOffset Mips64JniCallingConvention::CurrentParamStackOffset() {
+  CHECK(IsCurrentParamOnStack());
+  size_t offset = displacement_.Int32Value() - OutArgSize() + ((itr_args_ - 8) * kFramePointerSize);
+  CHECK_LT(offset, OutArgSize());
+  return FrameOffset(offset);
+}
+
+size_t Mips64JniCallingConvention::NumberOfOutgoingStackArgs() {
+  // all arguments including JNI args
+  size_t all_args = NumArgs() + NumberOfExtraArgumentsForJni();
+
+  // Nothing on the stack unless there are more than 8 arguments
+  return (all_args > 8) ? all_args - 8 : 0;
+}
+}  // namespace mips64
+}  // namespace art
diff --git a/compiler/jni/quick/mips64/calling_convention_mips64.h b/compiler/jni/quick/mips64/calling_convention_mips64.h
new file mode 100644
index 0000000..dc9273b
--- /dev/null
+++ b/compiler/jni/quick/mips64/calling_convention_mips64.h
@@ -0,0 +1,91 @@
+/*
+ * Copyright (C) 2015 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef ART_COMPILER_JNI_QUICK_MIPS64_CALLING_CONVENTION_MIPS64_H_
+#define ART_COMPILER_JNI_QUICK_MIPS64_CALLING_CONVENTION_MIPS64_H_
+
+#include "jni/quick/calling_convention.h"
+
+namespace art {
+namespace mips64 {
+
+constexpr size_t kFramePointerSize = 8;
+
+class Mips64ManagedRuntimeCallingConvention FINAL : public ManagedRuntimeCallingConvention {
+ public:
+  Mips64ManagedRuntimeCallingConvention(bool is_static, bool is_synchronized, const char* shorty)
+      : ManagedRuntimeCallingConvention(is_static, is_synchronized, shorty, kFramePointerSize) {}
+  ~Mips64ManagedRuntimeCallingConvention() OVERRIDE {}
+  // Calling convention
+  ManagedRegister ReturnRegister() OVERRIDE;
+  ManagedRegister InterproceduralScratchRegister() OVERRIDE;
+  // Managed runtime calling convention
+  ManagedRegister MethodRegister() OVERRIDE;
+  bool IsCurrentParamInRegister() OVERRIDE;
+  bool IsCurrentParamOnStack() OVERRIDE;
+  ManagedRegister CurrentParamRegister() OVERRIDE;
+  FrameOffset CurrentParamStackOffset() OVERRIDE;
+  const ManagedRegisterEntrySpills& EntrySpills() OVERRIDE;
+
+ private:
+  ManagedRegisterEntrySpills entry_spills_;
+
+  DISALLOW_COPY_AND_ASSIGN(Mips64ManagedRuntimeCallingConvention);
+};
+
+class Mips64JniCallingConvention FINAL : public JniCallingConvention {
+ public:
+  explicit Mips64JniCallingConvention(bool is_static, bool is_synchronized, const char* shorty);
+  ~Mips64JniCallingConvention() OVERRIDE {}
+  // Calling convention
+  ManagedRegister ReturnRegister() OVERRIDE;
+  ManagedRegister IntReturnRegister() OVERRIDE;
+  ManagedRegister InterproceduralScratchRegister() OVERRIDE;
+  // JNI calling convention
+  size_t FrameSize() OVERRIDE;
+  size_t OutArgSize() OVERRIDE;
+  const std::vector<ManagedRegister>& CalleeSaveRegisters() const OVERRIDE {
+    return callee_save_regs_;
+  }
+  ManagedRegister ReturnScratchRegister() const OVERRIDE;
+  uint32_t CoreSpillMask() const OVERRIDE;
+  uint32_t FpSpillMask() const OVERRIDE {
+    return 0;  // Floats aren't spilled in JNI down call
+  }
+  bool IsCurrentParamInRegister() OVERRIDE;
+  bool IsCurrentParamOnStack() OVERRIDE;
+  ManagedRegister CurrentParamRegister() OVERRIDE;
+  FrameOffset CurrentParamStackOffset() OVERRIDE;
+
+  // Mips64 does not need to extend small return types.
+  bool RequiresSmallResultTypeExtension() const OVERRIDE {
+    return false;
+  }
+
+ protected:
+  size_t NumberOfOutgoingStackArgs() OVERRIDE;
+
+ private:
+  // TODO: these values aren't unique and can be shared amongst instances
+  std::vector<ManagedRegister> callee_save_regs_;
+
+  DISALLOW_COPY_AND_ASSIGN(Mips64JniCallingConvention);
+};
+
+}  // namespace mips64
+}  // namespace art
+
+#endif  // ART_COMPILER_JNI_QUICK_MIPS64_CALLING_CONVENTION_MIPS64_H_
diff --git a/compiler/optimizing/bounds_check_elimination.cc b/compiler/optimizing/bounds_check_elimination.cc
index 4ca3648..1d16794 100644
--- a/compiler/optimizing/bounds_check_elimination.cc
+++ b/compiler/optimizing/bounds_check_elimination.cc
@@ -944,6 +944,10 @@
 };
 
 void BoundsCheckElimination::Run() {
+  if (!graph_->HasArrayAccesses()) {
+    return;
+  }
+
   BCEVisitor visitor(graph_);
   // Reverse post order guarantees a node's dominators are visited first.
   // We want to visit in the dominator-based order since if a value is known to
diff --git a/compiler/optimizing/bounds_check_elimination_test.cc b/compiler/optimizing/bounds_check_elimination_test.cc
index a298413..24fa583 100644
--- a/compiler/optimizing/bounds_check_elimination_test.cc
+++ b/compiler/optimizing/bounds_check_elimination_test.cc
@@ -43,6 +43,7 @@
   ArenaAllocator allocator(&pool);
 
   HGraph* graph = new (&allocator) HGraph(&allocator);
+  graph->SetHasArrayAccesses(true);
 
   HBasicBlock* entry = new (&allocator) HBasicBlock(graph);
   graph->AddBlock(entry);
@@ -148,6 +149,7 @@
   ArenaAllocator allocator(&pool);
 
   HGraph* graph = new (&allocator) HGraph(&allocator);
+  graph->SetHasArrayAccesses(true);
 
   HBasicBlock* entry = new (&allocator) HBasicBlock(graph);
   graph->AddBlock(entry);
@@ -221,6 +223,7 @@
   ArenaAllocator allocator(&pool);
 
   HGraph* graph = new (&allocator) HGraph(&allocator);
+  graph->SetHasArrayAccesses(true);
 
   HBasicBlock* entry = new (&allocator) HBasicBlock(graph);
   graph->AddBlock(entry);
@@ -294,6 +297,7 @@
   ArenaAllocator allocator(&pool);
 
   HGraph* graph = new (&allocator) HGraph(&allocator);
+  graph->SetHasArrayAccesses(true);
 
   HBasicBlock* entry = new (&allocator) HBasicBlock(graph);
   graph->AddBlock(entry);
@@ -369,6 +373,7 @@
                               int increment,
                               IfCondition cond = kCondGE) {
   HGraph* graph = new (allocator) HGraph(allocator);
+  graph->SetHasArrayAccesses(true);
 
   HBasicBlock* entry = new (allocator) HBasicBlock(graph);
   graph->AddBlock(entry);
@@ -507,6 +512,7 @@
                               int increment = -1,
                               IfCondition cond = kCondLE) {
   HGraph* graph = new (allocator) HGraph(allocator);
+  graph->SetHasArrayAccesses(true);
 
   HBasicBlock* entry = new (allocator) HBasicBlock(graph);
   graph->AddBlock(entry);
@@ -640,6 +646,7 @@
                               int increment,
                               IfCondition cond) {
   HGraph* graph = new (allocator) HGraph(allocator);
+  graph->SetHasArrayAccesses(true);
 
   HBasicBlock* entry = new (allocator) HBasicBlock(graph);
   graph->AddBlock(entry);
@@ -752,6 +759,7 @@
                               int initial,
                               IfCondition cond = kCondGE) {
   HGraph* graph = new (allocator) HGraph(allocator);
+  graph->SetHasArrayAccesses(true);
 
   HBasicBlock* entry = new (allocator) HBasicBlock(graph);
   graph->AddBlock(entry);
@@ -879,6 +887,7 @@
   ArenaAllocator allocator(&pool);
 
   HGraph* graph = new (&allocator) HGraph(&allocator);
+  graph->SetHasArrayAccesses(true);
 
   HBasicBlock* entry = new (&allocator) HBasicBlock(graph);
   graph->AddBlock(entry);
diff --git a/compiler/optimizing/builder.cc b/compiler/optimizing/builder.cc
index 20a1b03..2cac93d 100644
--- a/compiler/optimizing/builder.cc
+++ b/compiler/optimizing/builder.cc
@@ -840,6 +840,7 @@
     current_block_->AddInstruction(new (arena_) HArrayGet(object, index, anticipated_type));
     UpdateLocal(source_or_dest_reg, current_block_->GetLastInstruction());
   }
+  graph_->SetHasArrayAccesses(true);
 }
 
 void HGraphBuilder::BuildFilledNewArray(uint32_t dex_pc,
diff --git a/compiler/optimizing/code_generator.cc b/compiler/optimizing/code_generator.cc
index ba5f7d8..ed3f949 100644
--- a/compiler/optimizing/code_generator.cc
+++ b/compiler/optimizing/code_generator.cc
@@ -216,6 +216,29 @@
   }
 }
 
+void CodeGenerator::BlockIfInRegister(Location location, bool is_out) const {
+  // The DCHECKS below check that a register is not specified twice in
+  // the summary. The out location can overlap with an input, so we need
+  // to special case it.
+  if (location.IsRegister()) {
+    DCHECK(is_out || !blocked_core_registers_[location.reg()]);
+    blocked_core_registers_[location.reg()] = true;
+  } else if (location.IsFpuRegister()) {
+    DCHECK(is_out || !blocked_fpu_registers_[location.reg()]);
+    blocked_fpu_registers_[location.reg()] = true;
+  } else if (location.IsFpuRegisterPair()) {
+    DCHECK(is_out || !blocked_fpu_registers_[location.AsFpuRegisterPairLow<int>()]);
+    blocked_fpu_registers_[location.AsFpuRegisterPairLow<int>()] = true;
+    DCHECK(is_out || !blocked_fpu_registers_[location.AsFpuRegisterPairHigh<int>()]);
+    blocked_fpu_registers_[location.AsFpuRegisterPairHigh<int>()] = true;
+  } else if (location.IsRegisterPair()) {
+    DCHECK(is_out || !blocked_core_registers_[location.AsRegisterPairLow<int>()]);
+    blocked_core_registers_[location.AsRegisterPairLow<int>()] = true;
+    DCHECK(is_out || !blocked_core_registers_[location.AsRegisterPairHigh<int>()]);
+    blocked_core_registers_[location.AsRegisterPairHigh<int>()] = true;
+  }
+}
+
 void CodeGenerator::AllocateRegistersLocally(HInstruction* instruction) const {
   LocationSummary* locations = instruction->GetLocations();
   if (locations == nullptr) return;
@@ -234,46 +257,19 @@
 
   // Mark all fixed input, temp and output registers as used.
   for (size_t i = 0, e = locations->GetInputCount(); i < e; ++i) {
-    Location loc = locations->InAt(i);
-    // The DCHECKS below check that a register is not specified twice in
-    // the summary.
-    if (loc.IsRegister()) {
-      DCHECK(!blocked_core_registers_[loc.reg()]);
-      blocked_core_registers_[loc.reg()] = true;
-    } else if (loc.IsFpuRegister()) {
-      DCHECK(!blocked_fpu_registers_[loc.reg()]);
-      blocked_fpu_registers_[loc.reg()] = true;
-    } else if (loc.IsFpuRegisterPair()) {
-      DCHECK(!blocked_fpu_registers_[loc.AsFpuRegisterPairLow<int>()]);
-      blocked_fpu_registers_[loc.AsFpuRegisterPairLow<int>()] = true;
-      DCHECK(!blocked_fpu_registers_[loc.AsFpuRegisterPairHigh<int>()]);
-      blocked_fpu_registers_[loc.AsFpuRegisterPairHigh<int>()] = true;
-    } else if (loc.IsRegisterPair()) {
-      DCHECK(!blocked_core_registers_[loc.AsRegisterPairLow<int>()]);
-      blocked_core_registers_[loc.AsRegisterPairLow<int>()] = true;
-      DCHECK(!blocked_core_registers_[loc.AsRegisterPairHigh<int>()]);
-      blocked_core_registers_[loc.AsRegisterPairHigh<int>()] = true;
-    }
+    BlockIfInRegister(locations->InAt(i));
   }
 
   for (size_t i = 0, e = locations->GetTempCount(); i < e; ++i) {
     Location loc = locations->GetTemp(i);
-    // The DCHECKS below check that a register is not specified twice in
-    // the summary.
-    if (loc.IsRegister()) {
-      DCHECK(!blocked_core_registers_[loc.reg()]);
-      blocked_core_registers_[loc.reg()] = true;
-    } else if (loc.IsFpuRegister()) {
-      DCHECK(!blocked_fpu_registers_[loc.reg()]);
-      blocked_fpu_registers_[loc.reg()] = true;
-    } else {
-      DCHECK(loc.GetPolicy() == Location::kRequiresRegister
-             || loc.GetPolicy() == Location::kRequiresFpuRegister);
-    }
+    BlockIfInRegister(loc);
+  }
+  Location result_location = locations->Out();
+  if (locations->OutputCanOverlapWithInputs()) {
+    BlockIfInRegister(result_location, /* is_out */ true);
   }
 
-  static constexpr bool kBaseline = true;
-  SetupBlockedRegisters(kBaseline);
+  SetupBlockedRegisters(/* is_baseline */ true);
 
   // Allocate all unallocated input locations.
   for (size_t i = 0, e = locations->GetInputCount(); i < e; ++i) {
@@ -318,7 +314,6 @@
       locations->SetTempAt(i, loc);
     }
   }
-  Location result_location = locations->Out();
   if (result_location.IsUnallocated()) {
     switch (result_location.GetPolicy()) {
       case Location::kAny:
diff --git a/compiler/optimizing/code_generator.h b/compiler/optimizing/code_generator.h
index f46a36d..5146afa 100644
--- a/compiler/optimizing/code_generator.h
+++ b/compiler/optimizing/code_generator.h
@@ -378,6 +378,7 @@
   void InitLocationsBaseline(HInstruction* instruction);
   size_t GetStackOffsetOfSavedRegister(size_t index);
   void CompileInternal(CodeAllocator* allocator, bool is_baseline);
+  void BlockIfInRegister(Location location, bool is_out = false) const;
 
   HGraph* const graph_;
   const CompilerOptions& compiler_options_;
diff --git a/compiler/optimizing/code_generator_x86.cc b/compiler/optimizing/code_generator_x86.cc
index 3c8f62c..07d88de 100644
--- a/compiler/optimizing/code_generator_x86.cc
+++ b/compiler/optimizing/code_generator_x86.cc
@@ -37,14 +37,13 @@
 static constexpr Register kRuntimeParameterCoreRegisters[] = { EAX, ECX, EDX, EBX };
 static constexpr size_t kRuntimeParameterCoreRegistersLength =
     arraysize(kRuntimeParameterCoreRegisters);
+static constexpr Register kCoreCalleeSaves[] = { EBP, ESI, EDI };
 static constexpr XmmRegister kRuntimeParameterFpuRegisters[] = { XMM0, XMM1, XMM2, XMM3 };
 static constexpr size_t kRuntimeParameterFpuRegistersLength =
     arraysize(kRuntimeParameterFpuRegisters);
 
 static constexpr int kC2ConditionMask = 0x400;
 
-// Marker for places that can be updated once we don't follow the quick ABI.
-static constexpr bool kFollowsQuickABI = true;
 static constexpr int kFakeReturnRegister = Register(8);
 
 class InvokeRuntimeCallingConvention : public CallingConvention<Register, XmmRegister> {
@@ -79,7 +78,7 @@
  public:
   explicit NullCheckSlowPathX86(HNullCheck* instruction) : instruction_(instruction) {}
 
-  virtual void EmitNativeCode(CodeGenerator* codegen) OVERRIDE {
+  void EmitNativeCode(CodeGenerator* codegen) OVERRIDE {
     __ Bind(GetEntryLabel());
     __ fs()->call(Address::Absolute(QUICK_ENTRYPOINT_OFFSET(kX86WordSize, pThrowNullPointer)));
     codegen->RecordPcInfo(instruction_, instruction_->GetDexPc());
@@ -94,7 +93,7 @@
  public:
   explicit DivZeroCheckSlowPathX86(HDivZeroCheck* instruction) : instruction_(instruction) {}
 
-  virtual void EmitNativeCode(CodeGenerator* codegen) OVERRIDE {
+  void EmitNativeCode(CodeGenerator* codegen) OVERRIDE {
     __ Bind(GetEntryLabel());
     __ fs()->call(Address::Absolute(QUICK_ENTRYPOINT_OFFSET(kX86WordSize, pThrowDivZero)));
     codegen->RecordPcInfo(instruction_, instruction_->GetDexPc());
@@ -109,7 +108,7 @@
  public:
   explicit DivRemMinusOneSlowPathX86(Register reg, bool is_div) : reg_(reg), is_div_(is_div) {}
 
-  virtual void EmitNativeCode(CodeGenerator* codegen) OVERRIDE {
+  void EmitNativeCode(CodeGenerator* codegen) OVERRIDE {
     __ Bind(GetEntryLabel());
     if (is_div_) {
       __ negl(reg_);
@@ -134,7 +133,7 @@
         index_location_(index_location),
         length_location_(length_location) {}
 
-  virtual void EmitNativeCode(CodeGenerator* codegen) OVERRIDE {
+  void EmitNativeCode(CodeGenerator* codegen) OVERRIDE {
     CodeGeneratorX86* x86_codegen = down_cast<CodeGeneratorX86*>(codegen);
     __ Bind(GetEntryLabel());
     // We're moving two locations to locations that could overlap, so we need a parallel
@@ -162,7 +161,7 @@
   explicit SuspendCheckSlowPathX86(HSuspendCheck* instruction, HBasicBlock* successor)
       : instruction_(instruction), successor_(successor) {}
 
-  virtual void EmitNativeCode(CodeGenerator* codegen) OVERRIDE {
+  void EmitNativeCode(CodeGenerator* codegen) OVERRIDE {
     CodeGeneratorX86* x86_codegen = down_cast<CodeGeneratorX86*>(codegen);
     __ Bind(GetEntryLabel());
     codegen->SaveLiveRegisters(instruction_->GetLocations());
@@ -193,7 +192,7 @@
  public:
   explicit LoadStringSlowPathX86(HLoadString* instruction) : instruction_(instruction) {}
 
-  virtual void EmitNativeCode(CodeGenerator* codegen) OVERRIDE {
+  void EmitNativeCode(CodeGenerator* codegen) OVERRIDE {
     LocationSummary* locations = instruction_->GetLocations();
     DCHECK(!locations->GetLiveRegisters()->ContainsCoreRegister(locations->Out().reg()));
 
@@ -228,7 +227,7 @@
     DCHECK(at->IsLoadClass() || at->IsClinitCheck());
   }
 
-  virtual void EmitNativeCode(CodeGenerator* codegen) OVERRIDE {
+  void EmitNativeCode(CodeGenerator* codegen) OVERRIDE {
     LocationSummary* locations = at_->GetLocations();
     CodeGeneratorX86* x86_codegen = down_cast<CodeGeneratorX86*>(codegen);
     __ Bind(GetEntryLabel());
@@ -281,7 +280,7 @@
         object_class_(object_class),
         dex_pc_(dex_pc) {}
 
-  virtual void EmitNativeCode(CodeGenerator* codegen) OVERRIDE {
+  void EmitNativeCode(CodeGenerator* codegen) OVERRIDE {
     LocationSummary* locations = instruction_->GetLocations();
     DCHECK(instruction_->IsCheckCast()
            || !locations->GetLiveRegisters()->ContainsCoreRegister(locations->Out().reg()));
@@ -371,8 +370,15 @@
 }
 
 CodeGeneratorX86::CodeGeneratorX86(HGraph* graph, const CompilerOptions& compiler_options)
-    : CodeGenerator(graph, kNumberOfCpuRegisters, kNumberOfXmmRegisters,
-                    kNumberOfRegisterPairs, (1 << kFakeReturnRegister), 0, compiler_options),
+    : CodeGenerator(graph,
+                    kNumberOfCpuRegisters,
+                    kNumberOfXmmRegisters,
+                    kNumberOfRegisterPairs,
+                    ComputeRegisterMask(reinterpret_cast<const int*>(kCoreCalleeSaves),
+                                        arraysize(kCoreCalleeSaves))
+                        | (1 << kFakeReturnRegister),
+                        0,
+                        compiler_options),
       block_labels_(graph->GetArena(), 0),
       location_builder_(graph, this),
       instruction_visitor_(graph, this),
@@ -427,18 +433,18 @@
   return Location();
 }
 
-void CodeGeneratorX86::SetupBlockedRegisters(bool is_baseline ATTRIBUTE_UNUSED) const {
+void CodeGeneratorX86::SetupBlockedRegisters(bool is_baseline) const {
   // Don't allocate the dalvik style register pair passing.
   blocked_register_pairs_[ECX_EDX] = true;
 
   // Stack register is always reserved.
   blocked_core_registers_[ESP] = true;
 
-  // TODO: We currently don't use Quick's callee saved registers.
-  DCHECK(kFollowsQuickABI);
-  blocked_core_registers_[EBP] = true;
-  blocked_core_registers_[ESI] = true;
-  blocked_core_registers_[EDI] = true;
+  if (is_baseline) {
+    blocked_core_registers_[EBP] = true;
+    blocked_core_registers_[ESI] = true;
+    blocked_core_registers_[EDI] = true;
+  }
 
   UpdateBlockedPairRegisters();
 }
@@ -470,15 +476,33 @@
     RecordPcInfo(nullptr, 0);
   }
 
-  if (!HasEmptyFrame()) {
-    __ subl(ESP, Immediate(GetFrameSize() - FrameEntrySpillSize()));
-    __ movl(Address(ESP, kCurrentMethodStackOffset), EAX);
+  if (HasEmptyFrame()) {
+    return;
   }
+
+  for (int i = arraysize(kCoreCalleeSaves) - 1; i >= 0; --i) {
+    Register reg = kCoreCalleeSaves[i];
+    if (allocated_registers_.ContainsCoreRegister(reg)) {
+      __ pushl(reg);
+    }
+  }
+
+  __ subl(ESP, Immediate(GetFrameSize() - FrameEntrySpillSize()));
+  __ movl(Address(ESP, kCurrentMethodStackOffset), EAX);
 }
 
 void CodeGeneratorX86::GenerateFrameExit() {
-  if (!HasEmptyFrame()) {
-    __ addl(ESP, Immediate(GetFrameSize() - FrameEntrySpillSize()));
+  if (HasEmptyFrame()) {
+    return;
+  }
+
+  __ addl(ESP, Immediate(GetFrameSize() - FrameEntrySpillSize()));
+
+  for (size_t i = 0; i < arraysize(kCoreCalleeSaves); ++i) {
+    Register reg = kCoreCalleeSaves[i];
+    if (allocated_registers_.ContainsCoreRegister(reg)) {
+      __ popl(reg);
+    }
   }
 }
 
@@ -907,7 +931,8 @@
   locations->SetInAt(0, Location::RequiresRegister());
   locations->SetInAt(1, Location::Any());
   if (comp->NeedsMaterialization()) {
-    locations->SetOut(Location::RequiresRegister());
+    // We need a byte register.
+    locations->SetOut(Location::RegisterLocation(ECX));
   }
 }
 
@@ -1345,8 +1370,10 @@
         case Primitive::kPrimInt:
         case Primitive::kPrimChar:
           // Processing a Dex `int-to-byte' instruction.
-          locations->SetInAt(0, Location::Any());
-          locations->SetOut(Location::RequiresRegister(), Location::kNoOutputOverlap);
+          locations->SetInAt(0, Location::ByteRegisterOrConstant(ECX, conversion->InputAt(0)));
+          // Make the output overlap to please the register allocator. This greatly simplifies
+          // the validation of the linear scan implementation
+          locations->SetOut(Location::RequiresRegister(), Location::kOutputOverlap);
           break;
 
         default:
@@ -3161,15 +3188,16 @@
 }
 
 void LocationsBuilderX86::VisitArraySet(HArraySet* instruction) {
+  // This location builder might end up asking to up to four registers, which is
+  // not currently possible for baseline. The situation in which we need four
+  // registers cannot be met by baseline though, because it has not run any
+  // optimization.
+
   Primitive::Type value_type = instruction->GetComponentType();
   bool needs_write_barrier =
       CodeGenerator::StoreNeedsWriteBarrier(value_type, instruction->GetValue());
 
-  DCHECK(kFollowsQuickABI);
-  bool not_enough_registers = needs_write_barrier
-      && !instruction->GetValue()->IsConstant()
-      && !instruction->GetIndex()->IsConstant();
-  bool needs_runtime_call = instruction->NeedsTypeCheck() || not_enough_registers;
+  bool needs_runtime_call = instruction->NeedsTypeCheck();
 
   LocationSummary* locations = new (GetGraph()->GetArena()) LocationSummary(
       instruction,
@@ -3389,7 +3417,7 @@
 void LocationsBuilderX86::VisitBoundsCheck(HBoundsCheck* instruction) {
   LocationSummary* locations =
       new (GetGraph()->GetArena()) LocationSummary(instruction, LocationSummary::kNoCall);
-  locations->SetInAt(0, Location::RequiresRegister());
+  locations->SetInAt(0, Location::RegisterOrConstant(instruction->InputAt(0)));
   locations->SetInAt(1, Location::RequiresRegister());
   if (instruction->HasUses()) {
     locations->SetOut(Location::SameAsFirstInput());
@@ -3398,15 +3426,20 @@
 
 void InstructionCodeGeneratorX86::VisitBoundsCheck(HBoundsCheck* instruction) {
   LocationSummary* locations = instruction->GetLocations();
-  SlowPathCodeX86* slow_path = new (GetGraph()->GetArena()) BoundsCheckSlowPathX86(
-      instruction, locations->InAt(0), locations->InAt(1));
+  Location index_loc = locations->InAt(0);
+  Location length_loc = locations->InAt(1);
+  SlowPathCodeX86* slow_path =
+    new (GetGraph()->GetArena()) BoundsCheckSlowPathX86(instruction, index_loc, length_loc);
   codegen_->AddSlowPath(slow_path);
 
-  Register index = locations->InAt(0).AsRegister<Register>();
-  Register length = locations->InAt(1).AsRegister<Register>();
-
-  __ cmpl(index, length);
-  __ j(kAboveEqual, slow_path->GetEntryLabel());
+  Register length = length_loc.AsRegister<Register>();
+  if (index_loc.IsConstant()) {
+    int32_t value = CodeGenerator::GetInt32ValueOf(index_loc.GetConstant());
+    __ cmpl(length, Immediate(value));
+  } else {
+    __ cmpl(length, index_loc.AsRegister<Register>());
+  }
+  __ j(kBelowEqual, slow_path->GetEntryLabel());
 }
 
 void LocationsBuilderX86::VisitTemporary(HTemporary* temp) {
diff --git a/compiler/optimizing/code_generator_x86_64.cc b/compiler/optimizing/code_generator_x86_64.cc
index 6365bca..07ba95d 100644
--- a/compiler/optimizing/code_generator_x86_64.cc
+++ b/compiler/optimizing/code_generator_x86_64.cc
@@ -68,7 +68,7 @@
  public:
   explicit NullCheckSlowPathX86_64(HNullCheck* instruction) : instruction_(instruction) {}
 
-  virtual void EmitNativeCode(CodeGenerator* codegen) OVERRIDE {
+  void EmitNativeCode(CodeGenerator* codegen) OVERRIDE {
     __ Bind(GetEntryLabel());
     __ gs()->call(
         Address::Absolute(QUICK_ENTRYPOINT_OFFSET(kX86_64WordSize, pThrowNullPointer), true));
@@ -84,7 +84,7 @@
  public:
   explicit DivZeroCheckSlowPathX86_64(HDivZeroCheck* instruction) : instruction_(instruction) {}
 
-  virtual void EmitNativeCode(CodeGenerator* codegen) OVERRIDE {
+  void EmitNativeCode(CodeGenerator* codegen) OVERRIDE {
     __ Bind(GetEntryLabel());
     __ gs()->call(
         Address::Absolute(QUICK_ENTRYPOINT_OFFSET(kX86_64WordSize, pThrowDivZero), true));
@@ -101,7 +101,7 @@
   explicit DivRemMinusOneSlowPathX86_64(Register reg, Primitive::Type type, bool is_div)
       : cpu_reg_(CpuRegister(reg)), type_(type), is_div_(is_div) {}
 
-  virtual void EmitNativeCode(CodeGenerator* codegen) OVERRIDE {
+  void EmitNativeCode(CodeGenerator* codegen) OVERRIDE {
     __ Bind(GetEntryLabel());
     if (type_ == Primitive::kPrimInt) {
       if (is_div_) {
@@ -133,7 +133,7 @@
   explicit SuspendCheckSlowPathX86_64(HSuspendCheck* instruction, HBasicBlock* successor)
       : instruction_(instruction), successor_(successor) {}
 
-  virtual void EmitNativeCode(CodeGenerator* codegen) OVERRIDE {
+  void EmitNativeCode(CodeGenerator* codegen) OVERRIDE {
     CodeGeneratorX86_64* x64_codegen = down_cast<CodeGeneratorX86_64*>(codegen);
     __ Bind(GetEntryLabel());
     codegen->SaveLiveRegisters(instruction_->GetLocations());
@@ -169,7 +169,7 @@
         index_location_(index_location),
         length_location_(length_location) {}
 
-  virtual void EmitNativeCode(CodeGenerator* codegen) OVERRIDE {
+  void EmitNativeCode(CodeGenerator* codegen) OVERRIDE {
     __ Bind(GetEntryLabel());
     // We're moving two locations to locations that could overlap, so we need a parallel
     // move resolver.
@@ -202,7 +202,7 @@
     DCHECK(at->IsLoadClass() || at->IsClinitCheck());
   }
 
-  virtual void EmitNativeCode(CodeGenerator* codegen) OVERRIDE {
+  void EmitNativeCode(CodeGenerator* codegen) OVERRIDE {
     LocationSummary* locations = at_->GetLocations();
     CodeGeneratorX86_64* x64_codegen = down_cast<CodeGeneratorX86_64*>(codegen);
     __ Bind(GetEntryLabel());
@@ -249,7 +249,7 @@
  public:
   explicit LoadStringSlowPathX86_64(HLoadString* instruction) : instruction_(instruction) {}
 
-  virtual void EmitNativeCode(CodeGenerator* codegen) OVERRIDE {
+  void EmitNativeCode(CodeGenerator* codegen) OVERRIDE {
     LocationSummary* locations = instruction_->GetLocations();
     DCHECK(!locations->GetLiveRegisters()->ContainsCoreRegister(locations->Out().reg()));
 
@@ -286,7 +286,7 @@
         object_class_(object_class),
         dex_pc_(dex_pc) {}
 
-  virtual void EmitNativeCode(CodeGenerator* codegen) OVERRIDE {
+  void EmitNativeCode(CodeGenerator* codegen) OVERRIDE {
     LocationSummary* locations = instruction_->GetLocations();
     DCHECK(instruction_->IsCheckCast()
            || !locations->GetLiveRegisters()->ContainsCoreRegister(locations->Out().reg()));
@@ -3164,7 +3164,7 @@
 void LocationsBuilderX86_64::VisitBoundsCheck(HBoundsCheck* instruction) {
   LocationSummary* locations =
       new (GetGraph()->GetArena()) LocationSummary(instruction, LocationSummary::kNoCall);
-  locations->SetInAt(0, Location::RequiresRegister());
+  locations->SetInAt(0, Location::RegisterOrConstant(instruction->InputAt(0)));
   locations->SetInAt(1, Location::RequiresRegister());
   if (instruction->HasUses()) {
     locations->SetOut(Location::SameAsFirstInput());
@@ -3173,15 +3173,20 @@
 
 void InstructionCodeGeneratorX86_64::VisitBoundsCheck(HBoundsCheck* instruction) {
   LocationSummary* locations = instruction->GetLocations();
-  SlowPathCodeX86_64* slow_path = new (GetGraph()->GetArena()) BoundsCheckSlowPathX86_64(
-      instruction, locations->InAt(0), locations->InAt(1));
+  Location index_loc = locations->InAt(0);
+  Location length_loc = locations->InAt(1);
+  SlowPathCodeX86_64* slow_path =
+    new (GetGraph()->GetArena()) BoundsCheckSlowPathX86_64(instruction, index_loc, length_loc);
   codegen_->AddSlowPath(slow_path);
 
-  CpuRegister index = locations->InAt(0).AsRegister<CpuRegister>();
-  CpuRegister length = locations->InAt(1).AsRegister<CpuRegister>();
-
-  __ cmpl(index, length);
-  __ j(kAboveEqual, slow_path->GetEntryLabel());
+  CpuRegister length = length_loc.AsRegister<CpuRegister>();
+  if (index_loc.IsConstant()) {
+    int32_t value = CodeGenerator::GetInt32ValueOf(index_loc.GetConstant());
+    __ cmpl(length, Immediate(value));
+  } else {
+    __ cmpl(length, index_loc.AsRegister<CpuRegister>());
+  }
+  __ j(kBelowEqual, slow_path->GetEntryLabel());
 }
 
 void CodeGeneratorX86_64::MarkGCCard(CpuRegister temp,
diff --git a/compiler/optimizing/inliner.cc b/compiler/optimizing/inliner.cc
index d55a3ca..b34957a 100644
--- a/compiler/optimizing/inliner.cc
+++ b/compiler/optimizing/inliner.cc
@@ -216,6 +216,10 @@
 
   callee_graph->InlineInto(graph_, invoke_instruction);
 
+  if (callee_graph->HasArrayAccesses()) {
+    graph_->SetHasArrayAccesses(true);
+  }
+
   // Now that we have inlined the callee, we need to update the next
   // instruction id of the caller, so that new instructions added
   // after optimizations get a unique id.
diff --git a/compiler/optimizing/nodes.h b/compiler/optimizing/nodes.h
index 98076a0..8b56166 100644
--- a/compiler/optimizing/nodes.h
+++ b/compiler/optimizing/nodes.h
@@ -113,6 +113,7 @@
         number_of_vregs_(0),
         number_of_in_vregs_(0),
         temporaries_vreg_slots_(0),
+        has_array_accesses_(false),
         current_instruction_id_(start_instruction_id) {}
 
   ArenaAllocator* GetArena() const { return arena_; }
@@ -199,6 +200,14 @@
     return reverse_post_order_;
   }
 
+  bool HasArrayAccesses() const {
+    return has_array_accesses_;
+  }
+
+  void SetHasArrayAccesses(bool value) {
+    has_array_accesses_ = value;
+  }
+
   HNullConstant* GetNullConstant();
 
  private:
@@ -236,6 +245,9 @@
   // Number of vreg size slots that the temporaries use (used in baseline compiler).
   size_t temporaries_vreg_slots_;
 
+  // Has array accesses. We can totally skip BCE if it's false.
+  bool has_array_accesses_;
+
   // The current id to assign to a newly added instruction. See HInstruction.id_.
   int32_t current_instruction_id_;
 
@@ -657,14 +669,14 @@
 #undef FORWARD_DECLARATION
 
 #define DECLARE_INSTRUCTION(type)                                       \
-  virtual InstructionKind GetKind() const { return k##type; }           \
-  virtual const char* DebugName() const { return #type; }               \
-  virtual const H##type* As##type() const OVERRIDE { return this; }     \
-  virtual H##type* As##type() OVERRIDE { return this; }                 \
-  virtual bool InstructionTypeEquals(HInstruction* other) const {       \
+  InstructionKind GetKind() const OVERRIDE { return k##type; }          \
+  const char* DebugName() const OVERRIDE { return #type; }              \
+  const H##type* As##type() const OVERRIDE { return this; }             \
+  H##type* As##type() OVERRIDE { return this; }                         \
+  bool InstructionTypeEquals(HInstruction* other) const OVERRIDE {      \
     return other->Is##type();                                           \
   }                                                                     \
-  virtual void Accept(HGraphVisitor* visitor)
+  void Accept(HGraphVisitor* visitor) OVERRIDE
 
 template <typename T> class HUseList;
 
@@ -1339,7 +1351,7 @@
       : HInstruction(side_effects), inputs_() {}
   virtual ~HTemplateInstruction() {}
 
-  virtual size_t InputCount() const { return N; }
+  size_t InputCount() const OVERRIDE { return N; }
 
  protected:
   const HUserRecord<HInstruction*> InputRecordAt(size_t i) const OVERRIDE { return inputs_[i]; }
@@ -1361,7 +1373,7 @@
       : HTemplateInstruction<N>(side_effects), type_(type) {}
   virtual ~HExpression() {}
 
-  virtual Primitive::Type GetType() const { return type_; }
+  Primitive::Type GetType() const OVERRIDE { return type_; }
 
  protected:
   Primitive::Type type_;
@@ -1373,7 +1385,7 @@
  public:
   HReturnVoid() : HTemplateInstruction(SideEffects::None()) {}
 
-  virtual bool IsControlFlow() const { return true; }
+  bool IsControlFlow() const OVERRIDE { return true; }
 
   DECLARE_INSTRUCTION(ReturnVoid);
 
@@ -1389,7 +1401,7 @@
     SetRawInputAt(0, value);
   }
 
-  virtual bool IsControlFlow() const { return true; }
+  bool IsControlFlow() const OVERRIDE { return true; }
 
   DECLARE_INSTRUCTION(Return);
 
@@ -1404,7 +1416,7 @@
  public:
   HExit() : HTemplateInstruction(SideEffects::None()) {}
 
-  virtual bool IsControlFlow() const { return true; }
+  bool IsControlFlow() const OVERRIDE { return true; }
 
   DECLARE_INSTRUCTION(Exit);
 
@@ -1466,8 +1478,8 @@
   HInstruction* GetInput() const { return InputAt(0); }
   Primitive::Type GetResultType() const { return GetType(); }
 
-  virtual bool CanBeMoved() const { return true; }
-  virtual bool InstructionDataEquals(HInstruction* other) const {
+  bool CanBeMoved() const OVERRIDE { return true; }
+  bool InstructionDataEquals(HInstruction* other) const OVERRIDE {
     UNUSED(other);
     return true;
   }
@@ -1534,8 +1546,8 @@
     }
   }
 
-  virtual bool CanBeMoved() const { return true; }
-  virtual bool InstructionDataEquals(HInstruction* other) const {
+  bool CanBeMoved() const OVERRIDE { return true; }
+  bool InstructionDataEquals(HInstruction* other) const OVERRIDE {
     UNUSED(other);
     return true;
   }
@@ -1588,16 +1600,16 @@
 
   bool IsCommutative() const OVERRIDE { return true; }
 
-  virtual int32_t Evaluate(int32_t x, int32_t y) const OVERRIDE {
+  int32_t Evaluate(int32_t x, int32_t y) const OVERRIDE {
     return x == y ? 1 : 0;
   }
-  virtual int64_t Evaluate(int64_t x, int64_t y) const OVERRIDE {
+  int64_t Evaluate(int64_t x, int64_t y) const OVERRIDE {
     return x == y ? 1 : 0;
   }
 
   DECLARE_INSTRUCTION(Equal);
 
-  virtual IfCondition GetCondition() const {
+  IfCondition GetCondition() const OVERRIDE {
     return kCondEQ;
   }
 
@@ -1612,16 +1624,16 @@
 
   bool IsCommutative() const OVERRIDE { return true; }
 
-  virtual int32_t Evaluate(int32_t x, int32_t y) const OVERRIDE {
+  int32_t Evaluate(int32_t x, int32_t y) const OVERRIDE {
     return x != y ? 1 : 0;
   }
-  virtual int64_t Evaluate(int64_t x, int64_t y) const OVERRIDE {
+  int64_t Evaluate(int64_t x, int64_t y) const OVERRIDE {
     return x != y ? 1 : 0;
   }
 
   DECLARE_INSTRUCTION(NotEqual);
 
-  virtual IfCondition GetCondition() const {
+  IfCondition GetCondition() const OVERRIDE {
     return kCondNE;
   }
 
@@ -1634,16 +1646,16 @@
   HLessThan(HInstruction* first, HInstruction* second)
       : HCondition(first, second) {}
 
-  virtual int32_t Evaluate(int32_t x, int32_t y) const OVERRIDE {
+  int32_t Evaluate(int32_t x, int32_t y) const OVERRIDE {
     return x < y ? 1 : 0;
   }
-  virtual int64_t Evaluate(int64_t x, int64_t y) const OVERRIDE {
+  int64_t Evaluate(int64_t x, int64_t y) const OVERRIDE {
     return x < y ? 1 : 0;
   }
 
   DECLARE_INSTRUCTION(LessThan);
 
-  virtual IfCondition GetCondition() const {
+  IfCondition GetCondition() const OVERRIDE {
     return kCondLT;
   }
 
@@ -1656,16 +1668,16 @@
   HLessThanOrEqual(HInstruction* first, HInstruction* second)
       : HCondition(first, second) {}
 
-  virtual int32_t Evaluate(int32_t x, int32_t y) const OVERRIDE {
+  int32_t Evaluate(int32_t x, int32_t y) const OVERRIDE {
     return x <= y ? 1 : 0;
   }
-  virtual int64_t Evaluate(int64_t x, int64_t y) const OVERRIDE {
+  int64_t Evaluate(int64_t x, int64_t y) const OVERRIDE {
     return x <= y ? 1 : 0;
   }
 
   DECLARE_INSTRUCTION(LessThanOrEqual);
 
-  virtual IfCondition GetCondition() const {
+  IfCondition GetCondition() const OVERRIDE {
     return kCondLE;
   }
 
@@ -1678,16 +1690,16 @@
   HGreaterThan(HInstruction* first, HInstruction* second)
       : HCondition(first, second) {}
 
-  virtual int32_t Evaluate(int32_t x, int32_t y) const OVERRIDE {
+  int32_t Evaluate(int32_t x, int32_t y) const OVERRIDE {
     return x > y ? 1 : 0;
   }
-  virtual int64_t Evaluate(int64_t x, int64_t y) const OVERRIDE {
+  int64_t Evaluate(int64_t x, int64_t y) const OVERRIDE {
     return x > y ? 1 : 0;
   }
 
   DECLARE_INSTRUCTION(GreaterThan);
 
-  virtual IfCondition GetCondition() const {
+  IfCondition GetCondition() const OVERRIDE {
     return kCondGT;
   }
 
@@ -1700,16 +1712,16 @@
   HGreaterThanOrEqual(HInstruction* first, HInstruction* second)
       : HCondition(first, second) {}
 
-  virtual int32_t Evaluate(int32_t x, int32_t y) const OVERRIDE {
+  int32_t Evaluate(int32_t x, int32_t y) const OVERRIDE {
     return x >= y ? 1 : 0;
   }
-  virtual int64_t Evaluate(int64_t x, int64_t y) const OVERRIDE {
+  int64_t Evaluate(int64_t x, int64_t y) const OVERRIDE {
     return x >= y ? 1 : 0;
   }
 
   DECLARE_INSTRUCTION(GreaterThanOrEqual);
 
-  virtual IfCondition GetCondition() const {
+  IfCondition GetCondition() const OVERRIDE {
     return kCondGE;
   }
 
@@ -1818,7 +1830,7 @@
  public:
   explicit HConstant(Primitive::Type type) : HExpression(type, SideEffects::None()) {}
 
-  virtual bool CanBeMoved() const { return true; }
+  bool CanBeMoved() const OVERRIDE { return true; }
 
   DECLARE_INSTRUCTION(Constant);
 
@@ -1832,12 +1844,12 @@
 
   float GetValue() const { return value_; }
 
-  virtual bool InstructionDataEquals(HInstruction* other) const {
+  bool InstructionDataEquals(HInstruction* other) const OVERRIDE {
     return bit_cast<float, int32_t>(other->AsFloatConstant()->value_) ==
         bit_cast<float, int32_t>(value_);
   }
 
-  virtual size_t ComputeHashCode() const { return static_cast<size_t>(GetValue()); }
+  size_t ComputeHashCode() const OVERRIDE { return static_cast<size_t>(GetValue()); }
 
   DECLARE_INSTRUCTION(FloatConstant);
 
@@ -1853,12 +1865,12 @@
 
   double GetValue() const { return value_; }
 
-  virtual bool InstructionDataEquals(HInstruction* other) const {
+  bool InstructionDataEquals(HInstruction* other) const OVERRIDE {
     return bit_cast<double, int64_t>(other->AsDoubleConstant()->value_) ==
         bit_cast<double, int64_t>(value_);
   }
 
-  virtual size_t ComputeHashCode() const { return static_cast<size_t>(GetValue()); }
+  size_t ComputeHashCode() const OVERRIDE { return static_cast<size_t>(GetValue()); }
 
   DECLARE_INSTRUCTION(DoubleConstant);
 
@@ -1919,11 +1931,11 @@
 
   int64_t GetValue() const { return value_; }
 
-  virtual bool InstructionDataEquals(HInstruction* other) const {
+  bool InstructionDataEquals(HInstruction* other) const OVERRIDE {
     return other->AsLongConstant()->value_ == value_;
   }
 
-  virtual size_t ComputeHashCode() const { return static_cast<size_t>(GetValue()); }
+  size_t ComputeHashCode() const OVERRIDE { return static_cast<size_t>(GetValue()); }
 
   DECLARE_INSTRUCTION(LongConstant);
 
@@ -1945,7 +1957,7 @@
 
 class HInvoke : public HInstruction {
  public:
-  virtual size_t InputCount() const { return inputs_.Size(); }
+  size_t InputCount() const OVERRIDE { return inputs_.Size(); }
 
   // Runtime needs to walk the stack, so Dex -> Dex calls need to
   // know their environment.
@@ -1955,7 +1967,7 @@
     SetRawInputAt(index, argument);
   }
 
-  virtual Primitive::Type GetType() const { return return_type_; }
+  Primitive::Type GetType() const OVERRIDE { return return_type_; }
 
   uint32_t GetDexPc() const { return dex_pc_; }
 
@@ -2123,8 +2135,8 @@
   explicit HNeg(Primitive::Type result_type, HInstruction* input)
       : HUnaryOperation(result_type, input) {}
 
-  virtual int32_t Evaluate(int32_t x) const OVERRIDE { return -x; }
-  virtual int64_t Evaluate(int64_t x) const OVERRIDE { return -x; }
+  int32_t Evaluate(int32_t x) const OVERRIDE { return -x; }
+  int64_t Evaluate(int64_t x) const OVERRIDE { return -x; }
 
   DECLARE_INSTRUCTION(Neg);
 
@@ -2170,12 +2182,12 @@
   HAdd(Primitive::Type result_type, HInstruction* left, HInstruction* right)
       : HBinaryOperation(result_type, left, right) {}
 
-  virtual bool IsCommutative() const OVERRIDE { return true; }
+  bool IsCommutative() const OVERRIDE { return true; }
 
-  virtual int32_t Evaluate(int32_t x, int32_t y) const OVERRIDE {
+  int32_t Evaluate(int32_t x, int32_t y) const OVERRIDE {
     return x + y;
   }
-  virtual int64_t Evaluate(int64_t x, int64_t y) const OVERRIDE {
+  int64_t Evaluate(int64_t x, int64_t y) const OVERRIDE {
     return x + y;
   }
 
@@ -2190,10 +2202,10 @@
   HSub(Primitive::Type result_type, HInstruction* left, HInstruction* right)
       : HBinaryOperation(result_type, left, right) {}
 
-  virtual int32_t Evaluate(int32_t x, int32_t y) const OVERRIDE {
+  int32_t Evaluate(int32_t x, int32_t y) const OVERRIDE {
     return x - y;
   }
-  virtual int64_t Evaluate(int64_t x, int64_t y) const OVERRIDE {
+  int64_t Evaluate(int64_t x, int64_t y) const OVERRIDE {
     return x - y;
   }
 
@@ -2208,10 +2220,10 @@
   HMul(Primitive::Type result_type, HInstruction* left, HInstruction* right)
       : HBinaryOperation(result_type, left, right) {}
 
-  virtual bool IsCommutative() const OVERRIDE { return true; }
+  bool IsCommutative() const OVERRIDE { return true; }
 
-  virtual int32_t Evaluate(int32_t x, int32_t y) const { return x * y; }
-  virtual int64_t Evaluate(int64_t x, int64_t y) const { return x * y; }
+  int32_t Evaluate(int32_t x, int32_t y) const OVERRIDE { return x * y; }
+  int64_t Evaluate(int64_t x, int64_t y) const OVERRIDE { return x * y; }
 
   DECLARE_INSTRUCTION(Mul);
 
@@ -2224,14 +2236,14 @@
   HDiv(Primitive::Type result_type, HInstruction* left, HInstruction* right, uint32_t dex_pc)
       : HBinaryOperation(result_type, left, right), dex_pc_(dex_pc) {}
 
-  virtual int32_t Evaluate(int32_t x, int32_t y) const {
+  int32_t Evaluate(int32_t x, int32_t y) const OVERRIDE {
     // Our graph structure ensures we never have 0 for `y` during constant folding.
     DCHECK_NE(y, 0);
     // Special case -1 to avoid getting a SIGFPE on x86(_64).
     return (y == -1) ? -x : x / y;
   }
 
-  virtual int64_t Evaluate(int64_t x, int64_t y) const {
+  int64_t Evaluate(int64_t x, int64_t y) const OVERRIDE {
     DCHECK_NE(y, 0);
     // Special case -1 to avoid getting a SIGFPE on x86(_64).
     return (y == -1) ? -x : x / y;
@@ -2252,13 +2264,13 @@
   HRem(Primitive::Type result_type, HInstruction* left, HInstruction* right, uint32_t dex_pc)
       : HBinaryOperation(result_type, left, right), dex_pc_(dex_pc) {}
 
-  virtual int32_t Evaluate(int32_t x, int32_t y) const {
+  int32_t Evaluate(int32_t x, int32_t y) const OVERRIDE {
     DCHECK_NE(y, 0);
     // Special case -1 to avoid getting a SIGFPE on x86(_64).
     return (y == -1) ? 0 : x % y;
   }
 
-  virtual int64_t Evaluate(int64_t x, int64_t y) const {
+  int64_t Evaluate(int64_t x, int64_t y) const OVERRIDE {
     DCHECK_NE(y, 0);
     // Special case -1 to avoid getting a SIGFPE on x86(_64).
     return (y == -1) ? 0 : x % y;
@@ -2429,14 +2441,14 @@
   explicit HNot(Primitive::Type result_type, HInstruction* input)
       : HUnaryOperation(result_type, input) {}
 
-  virtual bool CanBeMoved() const { return true; }
-  virtual bool InstructionDataEquals(HInstruction* other) const {
+  bool CanBeMoved() const OVERRIDE { return true; }
+  bool InstructionDataEquals(HInstruction* other) const OVERRIDE {
     UNUSED(other);
     return true;
   }
 
-  virtual int32_t Evaluate(int32_t x) const OVERRIDE { return ~x; }
-  virtual int64_t Evaluate(int64_t x) const OVERRIDE { return ~x; }
+  int32_t Evaluate(int32_t x) const OVERRIDE { return ~x; }
+  int64_t Evaluate(int64_t x) const OVERRIDE { return ~x; }
 
   DECLARE_INSTRUCTION(Not);
 
@@ -2760,15 +2772,15 @@
     SetRawInputAt(1, length);
   }
 
-  virtual bool CanBeMoved() const { return true; }
-  virtual bool InstructionDataEquals(HInstruction* other) const {
+  bool CanBeMoved() const OVERRIDE { return true; }
+  bool InstructionDataEquals(HInstruction* other) const OVERRIDE {
     UNUSED(other);
     return true;
   }
 
-  virtual bool NeedsEnvironment() const { return true; }
+  bool NeedsEnvironment() const OVERRIDE { return true; }
 
-  virtual bool CanThrow() const { return true; }
+  bool CanThrow() const OVERRIDE { return true; }
 
   uint32_t GetDexPc() const { return dex_pc_; }
 
@@ -2812,7 +2824,7 @@
   explicit HSuspendCheck(uint32_t dex_pc)
       : HTemplateInstruction(SideEffects::None()), dex_pc_(dex_pc) {}
 
-  virtual bool NeedsEnvironment() const {
+  bool NeedsEnvironment() const OVERRIDE {
     return true;
   }
 
@@ -3340,7 +3352,7 @@
 
   // Visit functions that delegate to to super class.
 #define DECLARE_VISIT_INSTRUCTION(name, super)                                        \
-  virtual void Visit##name(H##name* instr) OVERRIDE { Visit##super(instr); }
+  void Visit##name(H##name* instr) OVERRIDE { Visit##super(instr); }
 
   FOR_EACH_INSTRUCTION(DECLARE_VISIT_INSTRUCTION)
 
diff --git a/compiler/optimizing/parallel_move_test.cc b/compiler/optimizing/parallel_move_test.cc
index 44a3da2..817a44b 100644
--- a/compiler/optimizing/parallel_move_test.cc
+++ b/compiler/optimizing/parallel_move_test.cc
@@ -36,7 +36,7 @@
     }
   }
 
-  virtual void EmitMove(size_t index) {
+  void EmitMove(size_t index) OVERRIDE {
     MoveOperands* move = moves_.Get(index);
     if (!message_.str().empty()) {
       message_ << " ";
@@ -48,7 +48,7 @@
     message_ << ")";
   }
 
-  virtual void EmitSwap(size_t index) {
+  void EmitSwap(size_t index) OVERRIDE {
     MoveOperands* move = moves_.Get(index);
     if (!message_.str().empty()) {
       message_ << " ";
@@ -60,8 +60,8 @@
     message_ << ")";
   }
 
-  virtual void SpillScratch(int reg ATTRIBUTE_UNUSED) {}
-  virtual void RestoreScratch(int reg ATTRIBUTE_UNUSED) {}
+  void SpillScratch(int reg ATTRIBUTE_UNUSED) OVERRIDE {}
+  void RestoreScratch(int reg ATTRIBUTE_UNUSED) OVERRIDE {}
 
   std::string GetMessage() const {
     return  message_.str();
diff --git a/compiler/optimizing/prepare_for_register_allocation.h b/compiler/optimizing/prepare_for_register_allocation.h
index 0f697fb..c28507c 100644
--- a/compiler/optimizing/prepare_for_register_allocation.h
+++ b/compiler/optimizing/prepare_for_register_allocation.h
@@ -33,12 +33,12 @@
   void Run();
 
  private:
-  virtual void VisitNullCheck(HNullCheck* check) OVERRIDE;
-  virtual void VisitDivZeroCheck(HDivZeroCheck* check) OVERRIDE;
-  virtual void VisitBoundsCheck(HBoundsCheck* check) OVERRIDE;
-  virtual void VisitBoundType(HBoundType* bound_type) OVERRIDE;
-  virtual void VisitClinitCheck(HClinitCheck* check) OVERRIDE;
-  virtual void VisitCondition(HCondition* condition) OVERRIDE;
+  void VisitNullCheck(HNullCheck* check) OVERRIDE;
+  void VisitDivZeroCheck(HDivZeroCheck* check) OVERRIDE;
+  void VisitBoundsCheck(HBoundsCheck* check) OVERRIDE;
+  void VisitBoundType(HBoundType* bound_type) OVERRIDE;
+  void VisitClinitCheck(HClinitCheck* check) OVERRIDE;
+  void VisitCondition(HCondition* condition) OVERRIDE;
 
   DISALLOW_COPY_AND_ASSIGN(PrepareForRegisterAllocation);
 };
diff --git a/compiler/optimizing/pretty_printer.h b/compiler/optimizing/pretty_printer.h
index d2a21c8..934514e 100644
--- a/compiler/optimizing/pretty_printer.h
+++ b/compiler/optimizing/pretty_printer.h
@@ -32,7 +32,7 @@
     PrintString(": ");
   }
 
-  virtual void VisitInstruction(HInstruction* instruction) {
+  void VisitInstruction(HInstruction* instruction) OVERRIDE {
     PrintPreInstruction(instruction);
     PrintString(instruction->DebugName());
     PrintPostInstruction(instruction);
@@ -68,7 +68,7 @@
     PrintNewLine();
   }
 
-  virtual void VisitBasicBlock(HBasicBlock* block) {
+  void VisitBasicBlock(HBasicBlock* block) OVERRIDE {
     PrintString("BasicBlock ");
     PrintInt(block->GetBlockId());
     const GrowableArray<HBasicBlock*>& predecessors = block->GetPredecessors();
@@ -106,15 +106,15 @@
   explicit StringPrettyPrinter(HGraph* graph)
       : HPrettyPrinter(graph), str_(""), current_block_(nullptr) { }
 
-  virtual void PrintInt(int value) {
+  void PrintInt(int value) OVERRIDE {
     str_ += StringPrintf("%d", value);
   }
 
-  virtual void PrintString(const char* value) {
+  void PrintString(const char* value) OVERRIDE {
     str_ += value;
   }
 
-  virtual void PrintNewLine() {
+  void PrintNewLine() OVERRIDE {
     str_ += '\n';
   }
 
@@ -122,12 +122,12 @@
 
   std::string str() const { return str_; }
 
-  virtual void VisitBasicBlock(HBasicBlock* block) {
+  void VisitBasicBlock(HBasicBlock* block) OVERRIDE {
     current_block_ = block;
     HPrettyPrinter::VisitBasicBlock(block);
   }
 
-  virtual void VisitGoto(HGoto* gota) {
+  void VisitGoto(HGoto* gota) OVERRIDE {
     PrintString("  ");
     PrintInt(gota->GetId());
     PrintString(": Goto ");
diff --git a/compiler/optimizing/register_allocator.cc b/compiler/optimizing/register_allocator.cc
index 54e62a5..748ab22 100644
--- a/compiler/optimizing/register_allocator.cc
+++ b/compiler/optimizing/register_allocator.cc
@@ -1381,6 +1381,7 @@
                         : Location::StackSlot(interval->GetParent()->GetSpillSlot()));
   }
   UsePosition* use = current->GetFirstUse();
+  size_t safepoint_index = safepoints_.Size();
 
   // Walk over all siblings, updating locations of use positions, and
   // connecting them when they are adjacent.
@@ -1422,19 +1423,27 @@
     }
 
     // At each safepoint, we record stack and register information.
-    for (size_t i = 0, e = safepoints_.Size(); i < e; ++i) {
-      HInstruction* safepoint = safepoints_.Get(i);
+    // We iterate backwards to test safepoints in ascending order of positions,
+    // which is what LiveInterval::Covers is optimized for.
+    for (; safepoint_index > 0; --safepoint_index) {
+      HInstruction* safepoint = safepoints_.Get(safepoint_index - 1);
       size_t position = safepoint->GetLifetimePosition();
-      LocationSummary* locations = safepoint->GetLocations();
-      if (!current->Covers(position)) {
+
+      // Test that safepoints are ordered in the optimal way.
+      DCHECK(safepoint_index == safepoints_.Size()
+             || safepoints_.Get(safepoint_index)->GetLifetimePosition() <= position);
+
+      if (current->IsDeadAt(position)) {
+        break;
+      } else if (!current->Covers(position)) {
         continue;
-      }
-      if (interval->GetStart() == position) {
+      } else if (interval->GetStart() == position) {
         // The safepoint is for this instruction, so the location of the instruction
         // does not need to be saved.
         continue;
       }
 
+      LocationSummary* locations = safepoint->GetLocations();
       if ((current->GetType() == Primitive::kPrimNot) && current->GetParent()->HasSpillSlot()) {
         locations->SetStackBit(current->GetParent()->GetSpillSlot() / kVRegSize);
       }
diff --git a/compiler/optimizing/ssa_test.cc b/compiler/optimizing/ssa_test.cc
index 7fc1ec6..a05b38c9 100644
--- a/compiler/optimizing/ssa_test.cc
+++ b/compiler/optimizing/ssa_test.cc
@@ -32,15 +32,15 @@
  public:
   explicit SsaPrettyPrinter(HGraph* graph) : HPrettyPrinter(graph), str_("") {}
 
-  virtual void PrintInt(int value) {
+  void PrintInt(int value) OVERRIDE {
     str_ += StringPrintf("%d", value);
   }
 
-  virtual void PrintString(const char* value) {
+  void PrintString(const char* value) OVERRIDE {
     str_ += value;
   }
 
-  virtual void PrintNewLine() {
+  void PrintNewLine() OVERRIDE {
     str_ += '\n';
   }
 
@@ -48,7 +48,7 @@
 
   std::string str() const { return str_; }
 
-  virtual void VisitIntConstant(HIntConstant* constant) {
+  void VisitIntConstant(HIntConstant* constant) OVERRIDE {
     PrintPreInstruction(constant);
     str_ += constant->DebugName();
     str_ += " ";
diff --git a/compiler/optimizing/stack_map_test.cc b/compiler/optimizing/stack_map_test.cc
index 744fb45..5b02510 100644
--- a/compiler/optimizing/stack_map_test.cc
+++ b/compiler/optimizing/stack_map_test.cc
@@ -31,13 +31,19 @@
   return true;
 }
 
+size_t ComputeDexRegisterMapSize(size_t number_of_dex_registers) {
+  return DexRegisterMap::kFixedSize
+      + number_of_dex_registers * DexRegisterMap::SingleEntrySize();
+}
+
 TEST(StackMapTest, Test1) {
   ArenaPool pool;
   ArenaAllocator arena(&pool);
   StackMapStream stream(&arena);
 
   ArenaBitVector sp_mask(&arena, 0, false);
-  stream.AddStackMapEntry(0, 64, 0x3, &sp_mask, 2, 0);
+  size_t number_of_dex_registers = 2;
+  stream.AddStackMapEntry(0, 64, 0x3, &sp_mask, number_of_dex_registers, 0);
   stream.AddDexRegisterEntry(DexRegisterMap::kInStack, 0);
   stream.AddDexRegisterEntry(DexRegisterMap::kConstant, -2);
 
@@ -56,17 +62,21 @@
   ASSERT_EQ(0u, stack_map.GetDexPc());
   ASSERT_EQ(64u, stack_map.GetNativePcOffset());
   ASSERT_EQ(0x3u, stack_map.GetRegisterMask());
-  ASSERT_FALSE(stack_map.HasInlineInfo());
 
   MemoryRegion stack_mask = stack_map.GetStackMask();
   ASSERT_TRUE(SameBits(stack_mask, sp_mask));
 
   ASSERT_TRUE(stack_map.HasDexRegisterMap());
-  DexRegisterMap dex_registers = code_info.GetDexRegisterMapOf(stack_map, 2);
+  DexRegisterMap dex_registers =
+      code_info.GetDexRegisterMapOf(stack_map, number_of_dex_registers);
+  ASSERT_EQ(16u, dex_registers.Size());
+  ASSERT_EQ(16u, ComputeDexRegisterMapSize(number_of_dex_registers));
   ASSERT_EQ(DexRegisterMap::kInStack, dex_registers.GetLocationKind(0));
   ASSERT_EQ(DexRegisterMap::kConstant, dex_registers.GetLocationKind(1));
   ASSERT_EQ(0, dex_registers.GetValue(0));
   ASSERT_EQ(-2, dex_registers.GetValue(1));
+
+  ASSERT_FALSE(stack_map.HasInlineInfo());
 }
 
 TEST(StackMapTest, Test2) {
@@ -77,7 +87,8 @@
   ArenaBitVector sp_mask1(&arena, 0, true);
   sp_mask1.SetBit(2);
   sp_mask1.SetBit(4);
-  stream.AddStackMapEntry(0, 64, 0x3, &sp_mask1, 2, 2);
+  size_t number_of_dex_registers = 2;
+  stream.AddStackMapEntry(0, 64, 0x3, &sp_mask1, number_of_dex_registers, 2);
   stream.AddDexRegisterEntry(DexRegisterMap::kInStack, 0);
   stream.AddDexRegisterEntry(DexRegisterMap::kConstant, -2);
   stream.AddInlineInfoEntry(42);
@@ -86,8 +97,9 @@
   ArenaBitVector sp_mask2(&arena, 0, true);
   sp_mask2.SetBit(3);
   sp_mask1.SetBit(8);
-  stream.AddStackMapEntry(1, 128, 0xFF, &sp_mask2, 1, 0);
-  stream.AddDexRegisterEntry(DexRegisterMap::kInRegister, 0);
+  stream.AddStackMapEntry(1, 128, 0xFF, &sp_mask2, number_of_dex_registers, 0);
+  stream.AddDexRegisterEntry(DexRegisterMap::kInRegister, 18);
+  stream.AddDexRegisterEntry(DexRegisterMap::kInFpuRegister, 3);
 
   size_t size = stream.ComputeNeededSize();
   void* memory = arena.Alloc(size, kArenaAllocMisc);
@@ -98,6 +110,7 @@
   ASSERT_EQ(1u, code_info.GetStackMaskSize());
   ASSERT_EQ(2u, code_info.GetNumberOfStackMaps());
 
+  // First stack map.
   StackMap stack_map = code_info.GetStackMapAt(0);
   ASSERT_TRUE(stack_map.Equals(code_info.GetStackMapForDexPc(0)));
   ASSERT_TRUE(stack_map.Equals(code_info.GetStackMapForNativePcOffset(64)));
@@ -109,17 +122,22 @@
   ASSERT_TRUE(SameBits(stack_mask, sp_mask1));
 
   ASSERT_TRUE(stack_map.HasDexRegisterMap());
-  DexRegisterMap dex_registers = code_info.GetDexRegisterMapOf(stack_map, 2);
+  DexRegisterMap dex_registers =
+      code_info.GetDexRegisterMapOf(stack_map, number_of_dex_registers);
+  ASSERT_EQ(16u, dex_registers.Size());
+  ASSERT_EQ(16u, ComputeDexRegisterMapSize(number_of_dex_registers));
   ASSERT_EQ(DexRegisterMap::kInStack, dex_registers.GetLocationKind(0));
   ASSERT_EQ(DexRegisterMap::kConstant, dex_registers.GetLocationKind(1));
   ASSERT_EQ(0, dex_registers.GetValue(0));
   ASSERT_EQ(-2, dex_registers.GetValue(1));
 
+  ASSERT_TRUE(stack_map.HasInlineInfo());
   InlineInfo inline_info = code_info.GetInlineInfoOf(stack_map);
   ASSERT_EQ(2u, inline_info.GetDepth());
   ASSERT_EQ(42u, inline_info.GetMethodReferenceIndexAtDepth(0));
   ASSERT_EQ(82u, inline_info.GetMethodReferenceIndexAtDepth(1));
 
+  // Second stack map.
   stack_map = code_info.GetStackMapAt(1);
   ASSERT_TRUE(stack_map.Equals(code_info.GetStackMapForDexPc(1u)));
   ASSERT_TRUE(stack_map.Equals(code_info.GetStackMapForNativePcOffset(128u)));
@@ -130,6 +148,16 @@
   stack_mask = stack_map.GetStackMask();
   ASSERT_TRUE(SameBits(stack_mask, sp_mask2));
 
+  ASSERT_TRUE(stack_map.HasDexRegisterMap());
+  dex_registers =
+      code_info.GetDexRegisterMapOf(stack_map, number_of_dex_registers);
+  ASSERT_EQ(16u, dex_registers.Size());
+  ASSERT_EQ(16u, ComputeDexRegisterMapSize(number_of_dex_registers));
+  ASSERT_EQ(DexRegisterMap::kInRegister, dex_registers.GetLocationKind(0));
+  ASSERT_EQ(DexRegisterMap::kInFpuRegister, dex_registers.GetLocationKind(1));
+  ASSERT_EQ(18, dex_registers.GetValue(0));
+  ASSERT_EQ(3, dex_registers.GetValue(1));
+
   ASSERT_FALSE(stack_map.HasInlineInfo());
 }
 
diff --git a/dex2oat/dex2oat.cc b/dex2oat/dex2oat.cc
index df7bb57..c3303e1 100644
--- a/dex2oat/dex2oat.cc
+++ b/dex2oat/dex2oat.cc
@@ -487,10 +487,12 @@
     // Profile file to use
     double top_k_profile_threshold = CompilerOptions::kDefaultTopKProfileThreshold;
 
+    bool debuggable = false;
     bool include_patch_information = CompilerOptions::kDefaultIncludePatchInformation;
     bool include_debug_symbols = kIsDebugBuild;
     bool watch_dog_enabled = true;
     bool generate_gdb_information = kIsDebugBuild;
+    bool abort_on_hard_verifier_error = false;
 
     PassManagerOptions pass_manager_options;
 
@@ -675,6 +677,8 @@
       } else if (option == "--no-include-debug-symbols" || option == "--strip-symbols") {
         include_debug_symbols = false;
         generate_gdb_information = false;  // Depends on debug symbols, see above.
+      } else if (option == "--debuggable") {
+        debuggable = true;
       } else if (option.starts_with("--profile-file=")) {
         profile_file_ = option.substr(strlen("--profile-file=")).data();
         VLOG(compiler) << "dex2oat: profile file is " << profile_file_;
@@ -729,6 +733,8 @@
         if (swap_fd_ < 0) {
           Usage("--swap-fd passed a negative value %d", swap_fd_);
         }
+      } else if (option == "--abort-on-hard-verifier-error") {
+        abort_on_hard_verifier_error = true;
       } else {
         Usage("Unknown argument %s", option.data());
       }
@@ -867,14 +873,11 @@
         // For R6, only interpreter mode is working.
         // TODO: fix compiler for Mips32r6.
         compiler_filter_string = "interpret-only";
-      } else if (instruction_set_ == kMips64) {
-        // For Mips64, can only compile in interpreter mode.
-        // TODO: fix compiler for Mips64.
-        compiler_filter_string = "interpret-only";
       } else {
         compiler_filter_string = "speed";
       }
     }
+
     CHECK(compiler_filter_string != nullptr);
     CompilerOptions::CompilerFilter compiler_filter = CompilerOptions::kDefaultCompilerFilter;
     if (strcmp(compiler_filter_string, "verify-none") == 0) {
@@ -915,6 +918,10 @@
         break;
     }
 
+    if (debuggable) {
+      // TODO: Consider adding CFI info and symbols here.
+    }
+
     compiler_options_.reset(new CompilerOptions(compiler_filter,
                                                 huge_method_threshold,
                                                 large_method_threshold,
@@ -924,6 +931,7 @@
                                                 generate_gdb_information,
                                                 include_patch_information,
                                                 top_k_profile_threshold,
+                                                debuggable,
                                                 include_debug_symbols,
                                                 implicit_null_checks,
                                                 implicit_so_checks,
@@ -933,7 +941,8 @@
                                                     nullptr :
                                                     &verbose_methods_,
                                                 new PassManagerOptions(pass_manager_options),
-                                                init_failure_output_.get()));
+                                                init_failure_output_.get(),
+                                                abort_on_hard_verifier_error));
 
     // Done with usage checks, enable watchdog if requested
     if (watch_dog_enabled) {
@@ -1635,9 +1644,10 @@
   void LogCompletionTime() {
     // Note: when creation of a runtime fails, e.g., when trying to compile an app but when there
     //       is no image, there won't be a Runtime::Current().
+    // Note: driver creation can fail when loading an invalid dex file.
     LOG(INFO) << "dex2oat took " << PrettyDuration(NanoTime() - start_ns_)
               << " (threads: " << thread_count_ << ") "
-              << ((Runtime::Current() != nullptr) ?
+              << ((Runtime::Current() != nullptr && driver_.get() != nullptr) ?
                   driver_->GetMemoryUsageString(kIsDebugBuild || VLOG_IS_ON(compiler)) :
                   "");
   }
diff --git a/disassembler/disassembler_arm64.cc b/disassembler/disassembler_arm64.cc
index 4ff44b4..d195efc 100644
--- a/disassembler/disassembler_arm64.cc
+++ b/disassembler/disassembler_arm64.cc
@@ -43,18 +43,15 @@
     const vixl::Instruction* instr,
     const vixl::CPURegister& reg) {
   USE(instr);
-  if (reg.IsRegister()) {
-    switch (reg.code()) {
-      case IP0: AppendToOutput(reg.Is64Bits() ? "ip0" : "wip0"); return;
-      case IP1: AppendToOutput(reg.Is64Bits() ? "ip1" : "wip1"); return;
-      case TR:  AppendToOutput(reg.Is64Bits() ? "tr"  :  "w18"); return;
-      case ETR: AppendToOutput(reg.Is64Bits() ? "etr" :  "w21"); return;
-      case FP:  AppendToOutput(reg.Is64Bits() ? "fp"  :  "w29"); return;
-      case LR:  AppendToOutput(reg.Is64Bits() ? "lr"  :  "w30"); return;
-      default:
-        // Fall through.
-        break;
+  if (reg.IsRegister() && reg.Is64Bits()) {
+    if (reg.code() == TR) {
+      AppendToOutput("tr");
+      return;
+    } else if (reg.code() == LR) {
+      AppendToOutput("lr");
+      return;
     }
+    // Fall through.
   }
   // Print other register names as usual.
   Disassembler::AppendRegisterNameToOutput(instr, reg);
@@ -105,13 +102,7 @@
 size_t DisassemblerArm64::Dump(std::ostream& os, const uint8_t* begin) {
   const vixl::Instruction* instr = reinterpret_cast<const vixl::Instruction*>(begin);
   decoder.Decode(instr);
-  // TODO: Use FormatInstructionPointer() once VIXL provides the appropriate
-  // features.
-  // VIXL does not yet allow remapping addresses disassembled. Using
-  // FormatInstructionPointer() would show incoherences between the instruction
-  // location addresses and the target addresses disassembled by VIXL (eg. for
-  // branch instructions).
-  os << StringPrintf("%p", instr)
+    os << FormatInstructionPointer(begin)
      << StringPrintf(": %08x\t%s\n", instr->InstructionBits(), disasm.GetOutput());
   return vixl::kInstructionSize;
 }
diff --git a/disassembler/disassembler_arm64.h b/disassembler/disassembler_arm64.h
index 57f11c8..3fb5c7f 100644
--- a/disassembler/disassembler_arm64.h
+++ b/disassembler/disassembler_arm64.h
@@ -30,8 +30,12 @@
 
 class CustomDisassembler FINAL : public vixl::Disassembler {
  public:
-  explicit CustomDisassembler(bool read_literals) :
-      vixl::Disassembler(), read_literals_(read_literals) {}
+  explicit CustomDisassembler(DisassemblerOptions* options) :
+      vixl::Disassembler(), read_literals_(options->can_read_literals_) {
+    if (!options->absolute_addresses_) {
+      MapCodeAddress(0, reinterpret_cast<const vixl::Instruction*>(options->base_address_));
+    }
+  }
 
   // Use register aliases in the disassembly.
   void AppendRegisterNameToOutput(const vixl::Instruction* instr,
@@ -55,11 +59,8 @@
 
 class DisassemblerArm64 FINAL : public Disassembler {
  public:
-  // TODO: Update this code once VIXL provides the ability to map code addresses
-  // to disassemble as a different address (the way FormatInstructionPointer()
-  // does).
   explicit DisassemblerArm64(DisassemblerOptions* options) :
-      Disassembler(options), disasm(options->can_read_literals_) {
+      Disassembler(options), disasm(options) {
     decoder.AppendVisitor(&disasm);
   }
 
diff --git a/disassembler/disassembler_mips64.cc b/disassembler/disassembler_mips64.cc
index 2d3239f..7b289d0 100644
--- a/disassembler/disassembler_mips64.cc
+++ b/disassembler/disassembler_mips64.cc
@@ -43,7 +43,7 @@
 static const uint32_t kITypeMask = (0x3f << kOpcodeShift);
 static const uint32_t kJTypeMask = (0x3f << kOpcodeShift);
 static const uint32_t kRTypeMask = ((0x3f << kOpcodeShift) | (0x3f));
-static const uint32_t kSpecial2Mask = (0x3f << kOpcodeShift);
+static const uint32_t kSpecial0Mask = (0x3f << kOpcodeShift);
 static const uint32_t kFpMask = kRTypeMask;
 
 static const Mips64Instruction gMips64Instructions[] = {
@@ -58,24 +58,15 @@
   { kRTypeMask, 4, "sllv", "DTS", },
   { kRTypeMask, 6, "srlv", "DTS", },
   { kRTypeMask, 7, "srav", "DTS", },
-  { kRTypeMask, 8, "jr", "S", },
-  // rd = 31 is implicit.
-  { kRTypeMask | (0x1f << 11), 9 | (31 << 11), "jalr", "S", },
+  { kRTypeMask | (0x1f << 11), 9 | (31 << 11), "jalr", "S", },  // rd = 31 is implicit.
+  { kRTypeMask | (0x1f << 11), 9, "jr", "S", },  // rd = 0 is implicit.
   { kRTypeMask, 9, "jalr", "DS", },  // General case.
-  { kRTypeMask | (0x1f << 6), 10, "movz", "DST", },
-  { kRTypeMask | (0x1f << 6), 11, "movn", "DST", },
   { kRTypeMask, 12, "syscall", "", },  // TODO: code
   { kRTypeMask, 13, "break", "", },  // TODO: code
   { kRTypeMask, 15, "sync", "", },  // TODO: type
-  { kRTypeMask, 16, "mfhi", "D", },
-  { kRTypeMask, 17, "mthi", "S", },
-  { kRTypeMask, 18, "mflo", "D", },
-  { kRTypeMask, 19, "mtlo", "S", },
-  { kRTypeMask, 24, "mult", "ST", },
-  { kRTypeMask, 25, "multu", "ST", },
-  { kRTypeMask, 26, "div", "ST", },
-  { kRTypeMask, 27, "divu", "ST", },
-  { kRTypeMask, 32, "add", "DST", },
+  { kRTypeMask, 20, "dsllv", "DTS", },
+  { kRTypeMask, 22, "dsrlv", "DTS", },
+  { kRTypeMask, 23, "dsrav", "DTS", },
   { kRTypeMask, 33, "addu", "DST", },
   { kRTypeMask, 34, "sub", "DST", },
   { kRTypeMask, 35, "subu", "DST", },
@@ -85,27 +76,37 @@
   { kRTypeMask, 39, "nor", "DST", },
   { kRTypeMask, 42, "slt", "DST", },
   { kRTypeMask, 43, "sltu", "DST", },
-  { kRTypeMask, 44, "dadd", "DST", },
   { kRTypeMask, 45, "daddu", "DST", },
   { kRTypeMask, 46, "dsub", "DST", },
   { kRTypeMask, 47, "dsubu", "DST", },
-  // 0, 48, tge
-  // 0, 49, tgeu
-  // 0, 50, tlt
-  // 0, 51, tltu
-  // 0, 52, teq
-  // 0, 54, tne
+  // TODO: seleqz, selnez
+  { kRTypeMask, 56, "dsll", "DTA", },
+  { kRTypeMask, 58, "dsrl", "DTA", },
+  { kRTypeMask, 59, "dsra", "DTA", },
+  { kRTypeMask, 60, "dsll32", "DTA", },
+  { kRTypeMask | (0x1f << 21), 62 | (1 << 21), "drotr32", "DTA", },
+  { kRTypeMask, 62, "dsrl32", "DTA", },
+  { kRTypeMask, 63, "dsra32", "DTA", },
 
-  // SPECIAL2
-  { kSpecial2Mask | 0x7ff, (28 << kOpcodeShift) | 2, "mul", "DST" },
-  { kSpecial2Mask | 0x7ff, (28 << kOpcodeShift) | 32, "clz", "DS" },
-  { kSpecial2Mask | 0x7ff, (28 << kOpcodeShift) | 36, "dclz", "DS" },
-  { kSpecial2Mask | 0xffff, (28 << kOpcodeShift) | 0, "madd", "ST" },
-  { kSpecial2Mask | 0xffff, (28 << kOpcodeShift) | 1, "maddu", "ST" },
-  { kSpecial2Mask | 0xffff, (28 << kOpcodeShift) | 2, "mul", "DST" },
-  { kSpecial2Mask | 0xffff, (28 << kOpcodeShift) | 4, "msub", "ST" },
-  { kSpecial2Mask | 0xffff, (28 << kOpcodeShift) | 5, "msubu", "ST" },
-  { kSpecial2Mask | 0x3f, (28 << kOpcodeShift) | 0x3f, "sdbbp", "" },
+  // SPECIAL0
+  { kSpecial0Mask | 0x7ff, (2 << 6) | 24, "mul", "DST" },
+  { kSpecial0Mask | 0x7ff, (3 << 6) | 24, "muh", "DST" },
+  { kSpecial0Mask | 0x7ff, (2 << 6) | 25, "mulu", "DST" },
+  { kSpecial0Mask | 0x7ff, (3 << 6) | 25, "muhu", "DST" },
+  { kSpecial0Mask | 0x7ff, (2 << 6) | 26, "div", "DST" },
+  { kSpecial0Mask | 0x7ff, (3 << 6) | 26, "mod", "DST" },
+  { kSpecial0Mask | 0x7ff, (2 << 6) | 27, "divu", "DST" },
+  { kSpecial0Mask | 0x7ff, (3 << 6) | 27, "modu", "DST" },
+  { kSpecial0Mask | 0x7ff, (2 << 6) | 28, "dmul", "DST" },
+  { kSpecial0Mask | 0x7ff, (3 << 6) | 28, "dmuh", "DST" },
+  { kSpecial0Mask | 0x7ff, (2 << 6) | 29, "dmulu", "DST" },
+  { kSpecial0Mask | 0x7ff, (3 << 6) | 29, "dmuhu", "DST" },
+  { kSpecial0Mask | 0x7ff, (2 << 6) | 30, "ddiv", "DST" },
+  { kSpecial0Mask | 0x7ff, (3 << 6) | 30, "dmod", "DST" },
+  { kSpecial0Mask | 0x7ff, (2 << 6) | 31, "ddivu", "DST" },
+  { kSpecial0Mask | 0x7ff, (3 << 6) | 31, "dmodu", "DST" },
+  // TODO: [d]clz, [d]clo
+  // TODO: sdbbp
 
   // J-type instructions.
   { kJTypeMask, 2 << kOpcodeShift, "j", "L" },
@@ -116,33 +117,31 @@
   { kITypeMask, 5 << kOpcodeShift, "bne", "STB" },
   { kITypeMask | (0x1f << 16), 1 << kOpcodeShift | (1 << 16), "bgez", "SB" },
   { kITypeMask | (0x1f << 16), 1 << kOpcodeShift | (0 << 16), "bltz", "SB" },
-  { kITypeMask | (0x1f << 16), 1 << kOpcodeShift | (2 << 16), "bltzl", "SB" },
-  { kITypeMask | (0x1f << 16), 1 << kOpcodeShift | (16 << 16), "bltzal", "SB" },
-  { kITypeMask | (0x1f << 16), 1 << kOpcodeShift | (18 << 16), "bltzall", "SB" },
   { kITypeMask | (0x1f << 16), 6 << kOpcodeShift | (0 << 16), "blez", "SB" },
   { kITypeMask | (0x1f << 16), 7 << kOpcodeShift | (0 << 16), "bgtz", "SB" },
+  { kITypeMask | (0x1f << 16), 1 << kOpcodeShift | (6 << 16), "dahi", "Si", },
+  { kITypeMask | (0x1f << 16), 1 << kOpcodeShift | (30 << 16), "dati", "Si", },
 
   { 0xffff0000, (4 << kOpcodeShift), "b", "B" },
   { 0xffff0000, (1 << kOpcodeShift) | (17 << 16), "bal", "B" },
 
-  { kITypeMask, 8 << kOpcodeShift, "addi", "TSi", },
   { kITypeMask, 9 << kOpcodeShift, "addiu", "TSi", },
   { kITypeMask, 10 << kOpcodeShift, "slti", "TSi", },
   { kITypeMask, 11 << kOpcodeShift, "sltiu", "TSi", },
   { kITypeMask, 12 << kOpcodeShift, "andi", "TSi", },
   { kITypeMask, 13 << kOpcodeShift, "ori", "TSi", },
-  { kITypeMask, 14 << kOpcodeShift, "ori", "TSi", },
-  { kITypeMask, 15 << kOpcodeShift, "lui", "TI", },
-
-  { kITypeMask, 24 << kOpcodeShift, "daddi", "TSi", },
+  { kITypeMask, 14 << kOpcodeShift, "xori", "TSi", },
+  { kITypeMask | (0x1f << 21), 15 << kOpcodeShift, "lui", "TI", },
+  { kITypeMask, 15 << kOpcodeShift, "aui", "TSI", },
   { kITypeMask, 25 << kOpcodeShift, "daddiu", "TSi", },
-
+  { kITypeMask, 29 << kOpcodeShift, "daui", "TSi", },
 
   { kITypeMask, 32u << kOpcodeShift, "lb", "TO", },
   { kITypeMask, 33u << kOpcodeShift, "lh", "TO", },
   { kITypeMask, 35u << kOpcodeShift, "lw", "TO", },
   { kITypeMask, 36u << kOpcodeShift, "lbu", "TO", },
   { kITypeMask, 37u << kOpcodeShift, "lhu", "TO", },
+  { kITypeMask, 39u << kOpcodeShift, "lwu", "TO", },
   { kITypeMask, 40u << kOpcodeShift, "sb", "TO", },
   { kITypeMask, 41u << kOpcodeShift, "sh", "TO", },
   { kITypeMask, 43u << kOpcodeShift, "sw", "TO", },
@@ -154,27 +153,31 @@
   { kITypeMask, 63u << kOpcodeShift, "sd", "TO", },
 
   // Floating point.
-  { kFpMask,                kCop1 | 0, "add", "fdst" },
-  { kFpMask,                kCop1 | 1, "sub", "fdst" },
-  { kFpMask,                kCop1 | 2, "mul", "fdst" },
-  { kFpMask,                kCop1 | 3, "div", "fdst" },
-  { kFpMask | (0x1f << 16), kCop1 | 4, "sqrt", "fdst" },
-  { kFpMask | (0x1f << 16), kCop1 | 5, "abs", "fds" },
-  { kFpMask | (0x1f << 16), kCop1 | 6, "mov", "fds" },
-  { kFpMask | (0x1f << 16), kCop1 | 7, "neg", "fds" },
-  { kFpMask | (0x1f << 16), kCop1 | 8, "round.l", "fds" },
-  { kFpMask | (0x1f << 16), kCop1 | 9, "trunc.l", "fds" },
-  { kFpMask | (0x1f << 16), kCop1 | 10, "ceil.l", "fds" },
-  { kFpMask | (0x1f << 16), kCop1 | 11, "floor.l", "fds" },
-  { kFpMask | (0x1f << 16), kCop1 | 12, "round.w", "fds" },
-  { kFpMask | (0x1f << 16), kCop1 | 13, "trunc.w", "fds" },
-  { kFpMask | (0x1f << 16), kCop1 | 14, "ceil.w", "fds" },
-  { kFpMask | (0x1f << 16), kCop1 | 15, "floor.w", "fds" },
-  { kFpMask | (0x1f << 16), kCop1 | 32, "cvt.s", "fds" },
-  { kFpMask | (0x1f << 16), kCop1 | 33, "cvt.d", "fds" },
-  { kFpMask | (0x1f << 16), kCop1 | 36, "cvt.w", "fds" },
-  { kFpMask | (0x1f << 16), kCop1 | 37, "cvt.l", "fds" },
-  { kFpMask | (0x1f << 16), kCop1 | 38, "cvt.ps", "fds" },
+  { kFpMask | (0x1f << 21), kCop1 | (0x00 << 21), "mfc1", "Td" },
+  { kFpMask | (0x1f << 21), kCop1 | (0x01 << 21), "dmfc1", "Td" },
+  { kFpMask | (0x1f << 21), kCop1 | (0x04 << 21), "mtc1", "Td" },
+  { kFpMask | (0x1f << 21), kCop1 | (0x05 << 21), "dmtc1", "Td" },
+  { kFpMask | (0x10 << 21), kCop1 | (0x10 << 21) | 0, "add", "fadt" },
+  { kFpMask | (0x10 << 21), kCop1 | (0x10 << 21) | 1, "sub", "fadt" },
+  { kFpMask | (0x10 << 21), kCop1 | (0x10 << 21) | 2, "mul", "fadt" },
+  { kFpMask | (0x10 << 21), kCop1 | (0x10 << 21) | 3, "div", "fadt" },
+  { kFpMask | (0x10 << 21), kCop1 | (0x10 << 21) | 4, "sqrt", "fad" },
+  { kFpMask | (0x21f << 16), kCop1 | (0x200 << 16) | 5, "abs", "fad" },
+  { kFpMask | (0x21f << 16), kCop1 | (0x200 << 16) | 6, "mov", "fad" },
+  { kFpMask | (0x21f << 16), kCop1 | (0x200 << 16) | 7, "neg", "fad" },
+  { kFpMask | (0x21f << 16), kCop1 | (0x200 << 16) | 8, "round.l", "fad" },
+  { kFpMask | (0x21f << 16), kCop1 | (0x200 << 16) | 9, "trunc.l", "fad" },
+  { kFpMask | (0x21f << 16), kCop1 | (0x200 << 16) | 10, "ceil.l", "fad" },
+  { kFpMask | (0x21f << 16), kCop1 | (0x200 << 16) | 11, "floor.l", "fad" },
+  { kFpMask | (0x21f << 16), kCop1 | (0x200 << 16) | 12, "round.w", "fad" },
+  { kFpMask | (0x21f << 16), kCop1 | (0x200 << 16) | 13, "trunc.w", "fad" },
+  { kFpMask | (0x21f << 16), kCop1 | (0x200 << 16) | 14, "ceil.w", "fad" },
+  { kFpMask | (0x21f << 16), kCop1 | (0x200 << 16) | 15, "floor.w", "fad" },
+  { kFpMask | (0x21f << 16), kCop1 | (0x200 << 16) | 32, "cvt.s", "fad" },
+  { kFpMask | (0x21f << 16), kCop1 | (0x200 << 16) | 33, "cvt.d", "fad" },
+  { kFpMask | (0x21f << 16), kCop1 | (0x200 << 16) | 36, "cvt.w", "fad" },
+  { kFpMask | (0x21f << 16), kCop1 | (0x200 << 16) | 37, "cvt.l", "fad" },
+  { kFpMask | (0x21f << 16), kCop1 | (0x200 << 16) | 38, "cvt.ps", "fad" },
 };
 
 static uint32_t ReadU32(const uint8_t* ptr) {
@@ -216,6 +219,7 @@
             break;
           case 'D': args << 'r' << rd; break;
           case 'd': args << 'f' << rd; break;
+          case 'a': args << 'f' << sa; break;
           case 'f':  // Floating point "fmt".
             {
               size_t fmt = (instruction >> 21) & 0x7;  // TODO: other fmts?
diff --git a/runtime/arch/mips64/quick_entrypoints_mips64.S b/runtime/arch/mips64/quick_entrypoints_mips64.S
index 6f1b826..8cb95f1 100644
--- a/runtime/arch/mips64/quick_entrypoints_mips64.S
+++ b/runtime/arch/mips64/quick_entrypoints_mips64.S
@@ -422,20 +422,120 @@
     move    $v1, $zero
 END art_quick_do_long_jump
 
-UNIMPLEMENTED art_quick_deliver_exception
-UNIMPLEMENTED art_quick_throw_null_pointer_exception
-UNIMPLEMENTED art_quick_throw_div_zero
-UNIMPLEMENTED art_quick_throw_array_bounds
-UNIMPLEMENTED art_quick_throw_stack_overflow
-UNIMPLEMENTED art_quick_throw_no_such_method
+    /*
+     * Called by managed code, saves most registers (forms basis of long jump
+     * context) and passes the bottom of the stack.
+     * artDeliverExceptionFromCode will place the callee save Method* at
+     * the bottom of the thread. On entry v0 holds Throwable*
+     */
+ENTRY art_quick_deliver_exception
+    SETUP_SAVE_ALL_CALLEE_SAVE_FRAME
+    dla  $t9, artDeliverExceptionFromCode
+    jalr $zero, $t9                 # artDeliverExceptionFromCode(Throwable*, Thread*)
+    move $a1, rSELF                 # pass Thread::Current
+END art_quick_deliver_exception
 
-UNIMPLEMENTED art_quick_invoke_interface_trampoline
-UNIMPLEMENTED art_quick_invoke_interface_trampoline_with_access_check
+    /*
+     * Called by managed code to create and deliver a NullPointerException
+     */
+    .extern artThrowNullPointerExceptionFromCode
+ENTRY art_quick_throw_null_pointer_exception
+.Lart_quick_throw_null_pointer_exception_gp_set:
+    SETUP_SAVE_ALL_CALLEE_SAVE_FRAME
+    dla  $t9, artThrowNullPointerExceptionFromCode
+    jalr $zero, $t9                 # artThrowNullPointerExceptionFromCode(Thread*)
+    move $a0, rSELF                 # pass Thread::Current
+END art_quick_throw_null_pointer_exception
 
-UNIMPLEMENTED art_quick_invoke_static_trampoline_with_access_check
-UNIMPLEMENTED art_quick_invoke_direct_trampoline_with_access_check
-UNIMPLEMENTED art_quick_invoke_super_trampoline_with_access_check
-UNIMPLEMENTED art_quick_invoke_virtual_trampoline_with_access_check
+    /*
+     * Called by managed code to create and deliver an ArithmeticException
+     */
+    .extern artThrowDivZeroFromCode
+ENTRY art_quick_throw_div_zero
+    SETUP_SAVE_ALL_CALLEE_SAVE_FRAME
+    dla  $t9, artThrowDivZeroFromCode
+    jalr $zero, $t9                 # artThrowDivZeroFromCode(Thread*)
+    move $a0, rSELF                 # pass Thread::Current
+END art_quick_throw_div_zero
+
+    /*
+     * Called by managed code to create and deliver an
+     * ArrayIndexOutOfBoundsException
+     */
+    .extern artThrowArrayBoundsFromCode
+ENTRY art_quick_throw_array_bounds
+.Lart_quick_throw_array_bounds_gp_set:
+    SETUP_SAVE_ALL_CALLEE_SAVE_FRAME
+    dla  $t9, artThrowArrayBoundsFromCode
+    jalr $zero, $t9                 # artThrowArrayBoundsFromCode(index, limit, Thread*)
+    move $a2, rSELF                 # pass Thread::Current
+END art_quick_throw_array_bounds
+
+    /*
+     * Called by managed code to create and deliver a StackOverflowError.
+     */
+    .extern artThrowStackOverflowFromCode
+ENTRY art_quick_throw_stack_overflow
+    SETUP_SAVE_ALL_CALLEE_SAVE_FRAME
+    dla  $t9, artThrowStackOverflowFromCode
+    jalr $zero, $t9                 # artThrowStackOverflowFromCode(Thread*)
+    move $a0, rSELF                 # pass Thread::Current
+END art_quick_throw_stack_overflow
+
+    /*
+     * Called by managed code to create and deliver a NoSuchMethodError.
+     */
+    .extern artThrowNoSuchMethodFromCode
+ENTRY art_quick_throw_no_such_method
+    SETUP_SAVE_ALL_CALLEE_SAVE_FRAME
+    dla  $t9, artThrowNoSuchMethodFromCode
+    jalr $zero, $t9                 # artThrowNoSuchMethodFromCode(method_idx, Thread*)
+    move $a1, rSELF                 # pass Thread::Current
+END art_quick_throw_no_such_method
+
+    /*
+     * All generated callsites for interface invokes and invocation slow paths will load arguments
+     * as usual - except instead of loading arg0/$a0 with the target Method*, arg0/$a0 will contain
+     * the method_idx.  This wrapper will save arg1-arg3, load the caller's Method*, align the
+     * stack and call the appropriate C helper.
+     * NOTE: "this" is first visable argument of the target, and so can be found in arg1/$a1.
+     *
+     * The helper will attempt to locate the target and return a 128-bit result in $v0/$v1 consisting
+     * of the target Method* in $v0 and method->code_ in $v1.
+     *
+     * If unsuccessful, the helper will return NULL/NULL. There will be a pending exception in the
+     * thread and we branch to another stub to deliver it.
+     *
+     * On success this wrapper will restore arguments and *jump* to the target, leaving the ra
+     * pointing back to the original caller.
+     */
+.macro INVOKE_TRAMPOLINE c_name, cxx_name
+    .extern \cxx_name
+ENTRY \c_name
+    SETUP_REFS_AND_ARGS_CALLEE_SAVE_FRAME  # save callee saves in case allocation triggers GC
+    lwu   $a2, FRAME_SIZE_REFS_AND_ARGS_CALLEE_SAVE($sp)  # pass caller Method*
+    move  $a3, rSELF                       # pass Thread::Current
+    jal   \cxx_name                        # (method_idx, this, caller, Thread*, $sp)
+    move  $a4, $sp                         # pass $sp
+    move  $a0, $v0                         # save target Method*
+    move  $t9, $v1                         # save $v0->code_
+    RESTORE_REFS_AND_ARGS_CALLEE_SAVE_FRAME
+    beq   $v0, $zero, 1f
+    nop
+    jalr  $zero, $t9
+    nop
+1:
+    DELIVER_PENDING_EXCEPTION
+END \c_name
+.endm
+
+INVOKE_TRAMPOLINE art_quick_invoke_interface_trampoline, artInvokeInterfaceTrampoline
+INVOKE_TRAMPOLINE art_quick_invoke_interface_trampoline_with_access_check, artInvokeInterfaceTrampolineWithAccessCheck
+
+INVOKE_TRAMPOLINE art_quick_invoke_static_trampoline_with_access_check, artInvokeStaticTrampolineWithAccessCheck
+INVOKE_TRAMPOLINE art_quick_invoke_direct_trampoline_with_access_check, artInvokeDirectTrampolineWithAccessCheck
+INVOKE_TRAMPOLINE art_quick_invoke_super_trampoline_with_access_check, artInvokeSuperTrampolineWithAccessCheck
+INVOKE_TRAMPOLINE art_quick_invoke_virtual_trampoline_with_access_check, artInvokeVirtualTrampolineWithAccessCheck
 
     # On entry:
     #   t0 = shorty
@@ -454,7 +554,7 @@
     li     $t9, 74               # put char 'J' into t9
     beq    $t9, $t3, 3f          # branch if result type char == 'J'
     nop
-    lwu    $\gpu, 0($t1)
+    lw     $\gpu, 0($t1)
     sw     $\gpu, 0($v0)
     daddiu $v0, 4
     daddiu $t1, 4
@@ -699,63 +799,534 @@
     sw    $v1, 4($a4)           # store the other half of the result
 END art_quick_invoke_static_stub
 
+    /*
+     * Entry from managed code that calls artHandleFillArrayDataFromCode and
+     * delivers exception on failure.
+     */
+    .extern artHandleFillArrayDataFromCode
+ENTRY art_quick_handle_fill_data
+    SETUP_REFS_ONLY_CALLEE_SAVE_FRAME  # save callee saves in case exception allocation triggers GC
+    lwu     $a2, FRAME_SIZE_REFS_ONLY_CALLEE_SAVE($sp)  # pass referrer's Method*
+    jal     artHandleFillArrayDataFromCode              # (payload offset, Array*, method, Thread*)
+    move    $a3, rSELF                                  # pass Thread::Current
+    RETURN_IF_ZERO
+END art_quick_handle_fill_data
 
+    /*
+     * Entry from managed code that calls artLockObjectFromCode, may block for GC.
+     */
+    .extern artLockObjectFromCode
+ENTRY art_quick_lock_object
+    beq     $a0, $zero, .Lart_quick_throw_null_pointer_exception_gp_set
+    nop
+    SETUP_REFS_ONLY_CALLEE_SAVE_FRAME     # save callee saves in case we block
+    jal     artLockObjectFromCode         # (Object* obj, Thread*)
+    move    $a1, rSELF                    # pass Thread::Current
+    RETURN_IF_ZERO
+END art_quick_lock_object
 
-UNIMPLEMENTED art_quick_handle_fill_data
-UNIMPLEMENTED art_quick_lock_object
-UNIMPLEMENTED art_quick_unlock_object
-UNIMPLEMENTED art_quick_check_cast
-UNIMPLEMENTED art_quick_aput_obj_with_null_and_bound_check
-UNIMPLEMENTED art_quick_aput_obj_with_bound_check
-UNIMPLEMENTED art_quick_aput_obj
-UNIMPLEMENTED art_quick_initialize_static_storage
-UNIMPLEMENTED art_quick_initialize_type
-UNIMPLEMENTED art_quick_initialize_type_and_verify_access
-UNIMPLEMENTED art_quick_get_boolean_static
-UNIMPLEMENTED art_quick_get_byte_static
-UNIMPLEMENTED art_quick_get_char_static
-UNIMPLEMENTED art_quick_get_short_static
-UNIMPLEMENTED art_quick_get32_static
-UNIMPLEMENTED art_quick_get64_static
-UNIMPLEMENTED art_quick_get_obj_static
-UNIMPLEMENTED art_quick_get_boolean_instance
-UNIMPLEMENTED art_quick_get_byte_instance
-UNIMPLEMENTED art_quick_get_char_instance
-UNIMPLEMENTED art_quick_get_short_instance
-UNIMPLEMENTED art_quick_get32_instance
-UNIMPLEMENTED art_quick_get64_instance
-UNIMPLEMENTED art_quick_get_obj_instance
-UNIMPLEMENTED art_quick_set8_static
-UNIMPLEMENTED art_quick_set16_static
-UNIMPLEMENTED art_quick_set32_static
-UNIMPLEMENTED art_quick_set64_static
-UNIMPLEMENTED art_quick_set_obj_static
-UNIMPLEMENTED art_quick_set8_instance
-UNIMPLEMENTED art_quick_set16_instance
-UNIMPLEMENTED art_quick_set32_instance
-UNIMPLEMENTED art_quick_set64_instance
-UNIMPLEMENTED art_quick_set_obj_instance
-UNIMPLEMENTED art_quick_resolve_string
+    /*
+     * Entry from managed code that calls artUnlockObjectFromCode and delivers exception on failure.
+     */
+    .extern artUnlockObjectFromCode
+ENTRY art_quick_unlock_object
+    beq     $a0, $zero, .Lart_quick_throw_null_pointer_exception_gp_set
+    nop
+    SETUP_REFS_ONLY_CALLEE_SAVE_FRAME  # save callee saves in case exception allocation triggers GC
+    jal     artUnlockObjectFromCode    # (Object* obj, Thread*)
+    move    $a1, rSELF                 # pass Thread::Current
+    RETURN_IF_ZERO
+END art_quick_unlock_object
+
+    /*
+     * Entry from managed code that calls artCheckCastFromCode and delivers exception on failure.
+     */
+    .extern artThrowClassCastException
+ENTRY art_quick_check_cast
+    daddiu $sp, $sp, -32
+    .cfi_adjust_cfa_offset 32
+    sd     $ra, 24($sp)
+    .cfi_rel_offset 31, 24
+    sd     $t9, 16($sp)
+    sd     $a1, 8($sp)
+    sd     $a0, 0($sp)
+    jal    artIsAssignableFromCode
+    nop
+    beq    $v0, $zero, .Lthrow_class_cast_exception
+    ld     $ra, 24($sp)
+    jalr   $zero, $ra
+    daddiu $sp, $sp, 32
+    .cfi_adjust_cfa_offset -32
+.Lthrow_class_cast_exception:
+    ld     $t9, 16($sp)
+    ld     $a1, 8($sp)
+    ld     $a0, 0($sp)
+    daddiu $sp, $sp, 32
+    .cfi_adjust_cfa_offset -32
+    SETUP_SAVE_ALL_CALLEE_SAVE_FRAME
+    dla  $t9, artThrowClassCastException
+    jalr $zero, $t9                 # artThrowClassCastException (Class*, Class*, Thread*)
+    move $a2, rSELF                 # pass Thread::Current
+END art_quick_check_cast
+
+    /*
+     * Entry from managed code for array put operations of objects where the value being stored
+     * needs to be checked for compatibility.
+     * a0 = array, a1 = index, a2 = value
+     */
+ENTRY art_quick_aput_obj_with_null_and_bound_check
+    bne    $a0, $zero, .Lart_quick_aput_obj_with_bound_check_gp_set
+    nop
+    b .Lart_quick_throw_null_pointer_exception_gp_set
+    nop
+END art_quick_aput_obj_with_null_and_bound_check
+
+ENTRY art_quick_aput_obj_with_bound_check
+    lwu  $t0, MIRROR_ARRAY_LENGTH_OFFSET($a0)
+    sltu $t1, $a1, $t0
+    bne  $t1, $zero, .Lart_quick_aput_obj_gp_set
+    nop
+    move $a0, $a1
+    b .Lart_quick_throw_array_bounds_gp_set
+    move $a1, $t0
+END art_quick_aput_obj_with_bound_check
+
+ENTRY art_quick_aput_obj
+    beq  $a2, $zero, .Ldo_aput_null
+    nop
+    lwu $t0, MIRROR_OBJECT_CLASS_OFFSET($a0)
+    lwu $t1, MIRROR_OBJECT_CLASS_OFFSET($a2)
+    lwu $t0, MIRROR_CLASS_COMPONENT_TYPE_OFFSET($t0)
+    bne $t1, $t0, .Lcheck_assignability  # value's type == array's component type - trivial assignability
+    nop
+.Ldo_aput:
+    dsll  $a1, $a1, 2
+    daddu $t0, $a0, $a1
+    sw   $a2, MIRROR_OBJECT_ARRAY_DATA_OFFSET($t0)
+    ld   $t0, THREAD_CARD_TABLE_OFFSET(rSELF)
+    dsrl  $t1, $a0, 7
+    daddu $t1, $t1, $t0
+    sb   $t0, ($t1)
+    jalr $zero, $ra
+    nop
+.Ldo_aput_null:
+    dsll  $a1, $a1, 2
+    daddu $t0, $a0, $a1
+    sw   $a2, MIRROR_OBJECT_ARRAY_DATA_OFFSET($t0)
+    jalr $zero, $ra
+    nop
+.Lcheck_assignability:
+    daddiu $sp, $sp, -64
+    .cfi_adjust_cfa_offset 64
+    sd     $ra, 56($sp)
+    .cfi_rel_offset 31, 56
+    sd     $t9, 24($sp)
+    sd     $a2, 16($sp)
+    sd     $a1, 8($sp)
+    sd     $a0, 0($sp)
+    move   $a1, $t1
+    move   $a0, $t0
+    jal    artIsAssignableFromCode  # (Class*, Class*)
+    nop
+    ld     $ra, 56($sp)
+    ld     $t9, 24($sp)
+    ld     $a2, 16($sp)
+    ld     $a1, 8($sp)
+    ld     $a0, 0($sp)
+    daddiu $sp, $sp, 64
+    .cfi_adjust_cfa_offset -64
+    bne    $v0, $zero, .Ldo_aput
+    nop
+    SETUP_SAVE_ALL_CALLEE_SAVE_FRAME
+    move   $a1, $a2
+    dla  $t9, artThrowArrayStoreException
+    jalr $zero, $t9                 # artThrowArrayStoreException(Class*, Class*, Thread*)
+    move   $a2, rSELF               # pass Thread::Current
+END art_quick_aput_obj
+
+    /*
+     * Entry from managed code when uninitialized static storage, this stub will run the class
+     * initializer and deliver the exception on error. On success the static storage base is
+     * returned.
+     */
+    .extern artInitializeStaticStorageFromCode
+ENTRY art_quick_initialize_static_storage
+    SETUP_REFS_ONLY_CALLEE_SAVE_FRAME           # save callee saves in case of GC
+    # artInitializeStaticStorageFromCode(uint32_t type_idx, Method* referrer, Thread*)
+    jal     artInitializeStaticStorageFromCode
+    move    $a2, rSELF                          # pass Thread::Current
+    RETURN_IF_RESULT_IS_NON_ZERO_OR_DELIVER
+END art_quick_initialize_static_storage
+
+    /*
+     * Entry from managed code when dex cache misses for a type_idx.
+     */
+    .extern artInitializeTypeFromCode
+ENTRY art_quick_initialize_type
+    SETUP_REFS_ONLY_CALLEE_SAVE_FRAME          # save callee saves in case of GC
+    # artInitializeTypeFromCode(uint32_t type_idx, Method* referrer, Thread*)
+    jal     artInitializeTypeFromCode
+    move    $a2, rSELF                         # pass Thread::Current
+    RETURN_IF_RESULT_IS_NON_ZERO_OR_DELIVER
+END art_quick_initialize_type
+
+    /*
+     * Entry from managed code when type_idx needs to be checked for access and dex cache may also
+     * miss.
+     */
+    .extern artInitializeTypeAndVerifyAccessFromCode
+ENTRY art_quick_initialize_type_and_verify_access
+    SETUP_REFS_ONLY_CALLEE_SAVE_FRAME          # save callee saves in case of GC
+    # artInitializeTypeFromCode(uint32_t type_idx, Method* referrer, Thread*)
+    jal     artInitializeTypeAndVerifyAccessFromCode
+    move    $a2, rSELF                         # pass Thread::Current
+    RETURN_IF_RESULT_IS_NON_ZERO_OR_DELIVER
+END art_quick_initialize_type_and_verify_access
+
+    /*
+     * Called by managed code to resolve a static field and load a boolean primitive value.
+     */
+    .extern artGetBooleanStaticFromCode
+ENTRY art_quick_get_boolean_static
+    SETUP_REFS_ONLY_CALLEE_SAVE_FRAME    # save callee saves in case of GC
+    lwu    $a1, FRAME_SIZE_REFS_ONLY_CALLEE_SAVE($sp)  # pass referrer's Method*
+    jal    artGetBooleanStaticFromCode   # (uint32_t field_idx, const Method* referrer, Thread*)
+    move   $a2, rSELF                    # pass Thread::Current
+    RETURN_IF_NO_EXCEPTION
+END art_quick_get_boolean_static
+
+    /*
+     * Called by managed code to resolve a static field and load a byte primitive value.
+     */
+    .extern artGetByteStaticFromCode
+ENTRY art_quick_get_byte_static
+    SETUP_REFS_ONLY_CALLEE_SAVE_FRAME    # save callee saves in case of GC
+    lwu    $a1, FRAME_SIZE_REFS_ONLY_CALLEE_SAVE($sp)  # pass referrer's Method*
+    jal    artGetByteStaticFromCode      # (uint32_t field_idx, const Method* referrer, Thread*)
+    move   $a2, rSELF                    # pass Thread::Current
+    RETURN_IF_NO_EXCEPTION
+END art_quick_get_byte_static
+
+    /*
+     * Called by managed code to resolve a static field and load a char primitive value.
+     */
+    .extern artGetCharStaticFromCode
+ENTRY art_quick_get_char_static
+    SETUP_REFS_ONLY_CALLEE_SAVE_FRAME    # save callee saves in case of GC
+    lwu    $a1, FRAME_SIZE_REFS_ONLY_CALLEE_SAVE($sp)  # pass referrer's Method*
+    jal    artGetCharStaticFromCode      # (uint32_t field_idx, const Method* referrer, Thread*)
+    move   $a2, rSELF                    # pass Thread::Current
+    RETURN_IF_NO_EXCEPTION
+END art_quick_get_char_static
+
+    /*
+     * Called by managed code to resolve a static field and load a short primitive value.
+     */
+    .extern artGetShortStaticFromCode
+ENTRY art_quick_get_short_static
+    SETUP_REFS_ONLY_CALLEE_SAVE_FRAME    # save callee saves in case of GC
+    lwu    $a1, FRAME_SIZE_REFS_ONLY_CALLEE_SAVE($sp)  # pass referrer's Method*
+    jal    artGetShortStaticFromCode     # (uint32_t field_idx, const Method* referrer, Thread*)
+    move   $a2, rSELF                    # pass Thread::Current
+    RETURN_IF_NO_EXCEPTION
+END art_quick_get_short_static
+
+    /*
+     * Called by managed code to resolve a static field and load a 32-bit primitive value.
+     */
+    .extern artGet32StaticFromCode
+ENTRY art_quick_get32_static
+    SETUP_REFS_ONLY_CALLEE_SAVE_FRAME    # save callee saves in case of GC
+    lwu    $a1, FRAME_SIZE_REFS_ONLY_CALLEE_SAVE($sp)  # pass referrer's Method*
+    jal    artGet32StaticFromCode        # (uint32_t field_idx, const Method* referrer, Thread*)
+    move   $a2, rSELF                    # pass Thread::Current
+    RETURN_IF_NO_EXCEPTION
+END art_quick_get32_static
+
+    /*
+     * Called by managed code to resolve a static field and load a 64-bit primitive value.
+     */
+    .extern artGet64StaticFromCode
+ENTRY art_quick_get64_static
+    SETUP_REFS_ONLY_CALLEE_SAVE_FRAME    # save callee saves in case of GC
+    lwu    $a1, FRAME_SIZE_REFS_ONLY_CALLEE_SAVE($sp)  # pass referrer's Method*
+    jal    artGet64StaticFromCode        # (uint32_t field_idx, const Method* referrer, Thread*)
+    move   $a2, rSELF                    # pass Thread::Current
+    RETURN_IF_NO_EXCEPTION
+END art_quick_get64_static
+
+    /*
+     * Called by managed code to resolve a static field and load an object reference.
+     */
+    .extern artGetObjStaticFromCode
+ENTRY art_quick_get_obj_static
+    SETUP_REFS_ONLY_CALLEE_SAVE_FRAME    # save callee saves in case of GC
+    lwu    $a1, FRAME_SIZE_REFS_ONLY_CALLEE_SAVE($sp)  # pass referrer's Method*
+    jal    artGetObjStaticFromCode       # (uint32_t field_idx, const Method* referrer, Thread*)
+    move   $a2, rSELF                    # pass Thread::Current
+    RETURN_IF_NO_EXCEPTION
+END art_quick_get_obj_static
+
+    /*
+     * Called by managed code to resolve an instance field and load a boolean primitive value.
+     */
+    .extern artGetBooleanInstanceFromCode
+ENTRY art_quick_get_boolean_instance
+    SETUP_REFS_ONLY_CALLEE_SAVE_FRAME    # save callee saves in case of GC
+    lwu    $a2, FRAME_SIZE_REFS_ONLY_CALLEE_SAVE($sp)  # pass referrer's Method*
+    jal    artGetBooleanInstanceFromCode # (field_idx, Object*, referrer, Thread*)
+    move   $a3, rSELF                    # pass Thread::Current
+    RETURN_IF_NO_EXCEPTION
+END art_quick_get_boolean_instance
+
+    /*
+     * Called by managed code to resolve an instance field and load a byte primitive value.
+     */
+    .extern artGetByteInstanceFromCode
+ENTRY art_quick_get_byte_instance
+    SETUP_REFS_ONLY_CALLEE_SAVE_FRAME    # save callee saves in case of GC
+    lwu    $a2, FRAME_SIZE_REFS_ONLY_CALLEE_SAVE($sp)  # pass referrer's Method*
+    jal    artGetByteInstanceFromCode    # (field_idx, Object*, referrer, Thread*)
+    move   $a3, rSELF                    # pass Thread::Current
+    RETURN_IF_NO_EXCEPTION
+END art_quick_get_byte_instance
+
+    /*
+     * Called by managed code to resolve an instance field and load a char primitive value.
+     */
+    .extern artGetCharInstanceFromCode
+ENTRY art_quick_get_char_instance
+    SETUP_REFS_ONLY_CALLEE_SAVE_FRAME    # save callee saves in case of GC
+    lwu    $a2, FRAME_SIZE_REFS_ONLY_CALLEE_SAVE($sp)  # pass referrer's Method*
+    jal    artGetCharInstanceFromCode    # (field_idx, Object*, referrer, Thread*)
+    move   $a3, rSELF                    # pass Thread::Current
+    RETURN_IF_NO_EXCEPTION
+END art_quick_get_char_instance
+
+    /*
+     * Called by managed code to resolve an instance field and load a short primitive value.
+     */
+    .extern artGetShortInstanceFromCode
+ENTRY art_quick_get_short_instance
+    SETUP_REFS_ONLY_CALLEE_SAVE_FRAME    # save callee saves in case of GC
+    lwu    $a2, FRAME_SIZE_REFS_ONLY_CALLEE_SAVE($sp)  # pass referrer's Method*
+    jal    artGetShortInstanceFromCode   # (field_idx, Object*, referrer, Thread*)
+    move   $a3, rSELF                    # pass Thread::Current
+    RETURN_IF_NO_EXCEPTION
+END art_quick_get_short_instance
+
+    /*
+     * Called by managed code to resolve an instance field and load a 32-bit primitive value.
+     */
+    .extern artGet32InstanceFromCode
+ENTRY art_quick_get32_instance
+    SETUP_REFS_ONLY_CALLEE_SAVE_FRAME    # save callee saves in case of GC
+    lwu    $a2, FRAME_SIZE_REFS_ONLY_CALLEE_SAVE($sp)  # pass referrer's Method*
+    jal    artGet32InstanceFromCode      # (field_idx, Object*, referrer, Thread*)
+    move   $a3, rSELF                    # pass Thread::Current
+    RETURN_IF_NO_EXCEPTION
+END art_quick_get32_instance
+
+    /*
+     * Called by managed code to resolve an instance field and load a 64-bit primitive value.
+     */
+    .extern artGet64InstanceFromCode
+ENTRY art_quick_get64_instance
+    SETUP_REFS_ONLY_CALLEE_SAVE_FRAME    # save callee saves in case of GC
+    lwu    $a2, FRAME_SIZE_REFS_ONLY_CALLEE_SAVE($sp)  # pass referrer's Method*
+    jal    artGet64InstanceFromCode      # (field_idx, Object*, referrer, Thread*)
+    move   $a3, rSELF                    # pass Thread::Current
+    RETURN_IF_NO_EXCEPTION
+END art_quick_get64_instance
+
+    /*
+     * Called by managed code to resolve an instance field and load an object reference.
+     */
+    .extern artGetObjInstanceFromCode
+ENTRY art_quick_get_obj_instance
+    SETUP_REFS_ONLY_CALLEE_SAVE_FRAME    # save callee saves in case of GC
+    lwu    $a2, FRAME_SIZE_REFS_ONLY_CALLEE_SAVE($sp)  # pass referrer's Method*
+    jal    artGetObjInstanceFromCode     # (field_idx, Object*, referrer, Thread*)
+    move   $a3, rSELF                    # pass Thread::Current
+    RETURN_IF_NO_EXCEPTION
+END art_quick_get_obj_instance
+
+    /*
+     * Called by managed code to resolve a static field and store a 8-bit primitive value.
+     */
+    .extern artSet8StaticFromCode
+ENTRY art_quick_set8_static
+    SETUP_REFS_ONLY_CALLEE_SAVE_FRAME    # save callee saves in case of GC
+    lwu    $a2, FRAME_SIZE_REFS_ONLY_CALLEE_SAVE($sp)  # pass referrer's Method*
+    jal    artSet8StaticFromCode         # (field_idx, new_val, referrer, Thread*)
+    move   $a3, rSELF                    # pass Thread::Current
+    RETURN_IF_ZERO
+END art_quick_set8_static
+
+    /*
+     * Called by managed code to resolve a static field and store a 16-bit primitive value.
+     */
+    .extern artSet16StaticFromCode
+ENTRY art_quick_set16_static
+    SETUP_REFS_ONLY_CALLEE_SAVE_FRAME    # save callee saves in case of GC
+    lwu    $a2, FRAME_SIZE_REFS_ONLY_CALLEE_SAVE($sp)  # pass referrer's Method*
+    jal    artSet16StaticFromCode        # (field_idx, new_val, referrer, Thread*)
+    move   $a3, rSELF                    # pass Thread::Current
+    RETURN_IF_ZERO
+END art_quick_set16_static
+
+    /*
+     * Called by managed code to resolve a static field and store a 32-bit primitive value.
+     */
+    .extern artSet32StaticFromCode
+ENTRY art_quick_set32_static
+    SETUP_REFS_ONLY_CALLEE_SAVE_FRAME    # save callee saves in case of GC
+    lwu    $a2, FRAME_SIZE_REFS_ONLY_CALLEE_SAVE($sp)  # pass referrer's Method*
+    jal    artSet32StaticFromCode        # (field_idx, new_val, referrer, Thread*)
+    move   $a3, rSELF                    # pass Thread::Current
+    RETURN_IF_ZERO
+END art_quick_set32_static
+
+    /*
+     * Called by managed code to resolve a static field and store a 64-bit primitive value.
+     */
+    .extern artSet64StaticFromCode
+ENTRY art_quick_set64_static
+    SETUP_REFS_ONLY_CALLEE_SAVE_FRAME    # save callee saves in case of GC
+    move   $a2, $a1                      # pass new_val
+    lwu    $a1, FRAME_SIZE_REFS_ONLY_CALLEE_SAVE($sp)  # pass referrer's Method*
+    jal    artSet64StaticFromCode        # (field_idx, referrer, new_val, Thread*)
+    move   $a3, rSELF                    # pass Thread::Current
+    RETURN_IF_ZERO
+END art_quick_set64_static
+
+    /*
+     * Called by managed code to resolve a static field and store an object reference.
+     */
+    .extern artSetObjStaticFromCode
+ENTRY art_quick_set_obj_static
+    SETUP_REFS_ONLY_CALLEE_SAVE_FRAME    # save callee saves in case of GC
+    lwu    $a2, FRAME_SIZE_REFS_ONLY_CALLEE_SAVE($sp)  # pass referrer's Method*
+    jal    artSetObjStaticFromCode       # (field_idx, new_val, referrer, Thread*)
+    move   $a3, rSELF                    # pass Thread::Current
+    RETURN_IF_ZERO
+END art_quick_set_obj_static
+
+    /*
+     * Called by managed code to resolve an instance field and store a 8-bit primitive value.
+     */
+    .extern artSet8InstanceFromCode
+ENTRY art_quick_set8_instance
+    SETUP_REFS_ONLY_CALLEE_SAVE_FRAME    # save callee saves in case of GC
+    lwu    $a3, FRAME_SIZE_REFS_ONLY_CALLEE_SAVE($sp)  # pass referrer's Method*
+    jal    artSet8InstanceFromCode       # (field_idx, Object*, new_val, referrer, Thread*)
+    move   $a4, rSELF                    # pass Thread::Current
+    RETURN_IF_ZERO
+END art_quick_set8_instance
+
+    /*
+     * Called by managed code to resolve an instance field and store a 16-bit primitive value.
+     */
+    .extern artSet16InstanceFromCode
+ENTRY art_quick_set16_instance
+    SETUP_REFS_ONLY_CALLEE_SAVE_FRAME    # save callee saves in case of GC
+    lwu    $a3, FRAME_SIZE_REFS_ONLY_CALLEE_SAVE($sp)  # pass referrer's Method*
+    jal    artSet16InstanceFromCode      # (field_idx, Object*, new_val, referrer, Thread*)
+    move   $a4, rSELF                    # pass Thread::Current
+    RETURN_IF_ZERO
+END art_quick_set16_instance
+
+    /*
+     * Called by managed code to resolve an instance field and store a 32-bit primitive value.
+     */
+    .extern artSet32InstanceFromCode
+ENTRY art_quick_set32_instance
+    SETUP_REFS_ONLY_CALLEE_SAVE_FRAME    # save callee saves in case of GC
+    lwu    $a3, FRAME_SIZE_REFS_ONLY_CALLEE_SAVE($sp)  # pass referrer's Method*
+    jal    artSet32InstanceFromCode      # (field_idx, Object*, new_val, referrer, Thread*)
+    move   $a4, rSELF                    # pass Thread::Current
+    RETURN_IF_ZERO
+END art_quick_set32_instance
+
+    /*
+     * Called by managed code to resolve an instance field and store a 64-bit primitive value.
+     */
+    .extern artSet64InstanceFromCode
+ENTRY art_quick_set64_instance
+    SETUP_REFS_ONLY_CALLEE_SAVE_FRAME    # save callee saves in case of GC
+    lwu    $a3, FRAME_SIZE_REFS_ONLY_CALLEE_SAVE($sp)  # pass referrer's Method*
+    jal    artSet64InstanceFromCode      # (field_idx, Object*, new_val, referrer, Thread*)
+    move   $a4, rSELF                    # pass Thread::Current
+    RETURN_IF_ZERO
+END art_quick_set64_instance
+
+    /*
+     * Called by managed code to resolve an instance field and store an object reference.
+     */
+    .extern artSetObjInstanceFromCode
+ENTRY art_quick_set_obj_instance
+    SETUP_REFS_ONLY_CALLEE_SAVE_FRAME    # save callee saves in case of GC
+    lwu    $a3, FRAME_SIZE_REFS_ONLY_CALLEE_SAVE($sp)  # pass referrer's Method*
+    jal    artSetObjInstanceFromCode     # (field_idx, Object*, new_val, referrer, Thread*)
+    move   $a4, rSELF                    # pass Thread::Current
+    RETURN_IF_ZERO
+END art_quick_set_obj_instance
+
+    /*
+     * Entry from managed code to resolve a string, this stub will allocate a String and deliver an
+     * exception on error. On success the String is returned. R0 holds the referring method,
+     * R1 holds the string index. The fast path check for hit in strings cache has already been
+     * performed.
+     */
+    .extern artResolveStringFromCode
+ENTRY art_quick_resolve_string
+    SETUP_REFS_ONLY_CALLEE_SAVE_FRAME  # save callee saves in case of GC
+    # artResolveStringFromCode(Method* referrer, uint32_t string_idx, Thread*, $sp)
+    jal     artResolveStringFromCode
+    move    $a2, rSELF                 # pass Thread::Current
+    RETURN_IF_RESULT_IS_NON_ZERO_OR_DELIVER
+END art_quick_resolve_string
 
 // Macro to facilitate adding new allocation entrypoints.
 .macro TWO_ARG_DOWNCALL name, entrypoint, return
+    .extern \entrypoint
 ENTRY \name
-    break
-    break
+    SETUP_REFS_ONLY_CALLEE_SAVE_FRAME  # save callee saves in case of GC
+    jal     \entrypoint
+    move    $a2, rSELF                 # pass Thread::Current
+    \return
 END \name
 .endm
 
 .macro THREE_ARG_DOWNCALL name, entrypoint, return
+    .extern \entrypoint
 ENTRY \name
-    break
-    break
+    SETUP_REFS_ONLY_CALLEE_SAVE_FRAME  # save callee saves in case of GC
+    jal     \entrypoint
+    move    $a3, rSELF                 # pass Thread::Current
+    \return
 END \name
 .endm
 
 // Generate the allocation entrypoints for each allocator.
 GENERATE_ALL_ALLOC_ENTRYPOINTS
 
-UNIMPLEMENTED art_quick_test_suspend
+    /*
+     * Called by managed code when the value in rSUSPEND has been decremented to 0.
+     */
+    .extern artTestSuspendFromCode
+ENTRY art_quick_test_suspend
+    lh     $a0, THREAD_FLAGS_OFFSET(rSELF)
+    bne    $a0, $zero, 1f
+    daddiu rSUSPEND, $zero, SUSPEND_CHECK_INTERVAL   # reset rSUSPEND to SUSPEND_CHECK_INTERVAL
+    jalr   $zero, $ra
+    nop
+1:
+    SETUP_REFS_ONLY_CALLEE_SAVE_FRAME         # save callee saves for stack crawl
+    jal    artTestSuspendFromCode             # (Thread*)
+    move   $a0, rSELF
+    RESTORE_REFS_ONLY_CALLEE_SAVE_FRAME_AND_RETURN
+END art_quick_test_suspend
 
     /*
      * Called by managed code that is attempting to call a method on a proxy class. On entry
@@ -779,7 +1350,19 @@
     DELIVER_PENDING_EXCEPTION
 END art_quick_proxy_invoke_handler
 
-UNIMPLEMENTED art_quick_imt_conflict_trampoline
+    /*
+     * Called to resolve an imt conflict. t0 is a hidden argument that holds the target method's
+     * dex method index.
+     */
+ENTRY art_quick_imt_conflict_trampoline
+    lwu     $a0, 0($sp)            # load caller Method*
+    lwu     $a0, MIRROR_ART_METHOD_DEX_CACHE_METHODS_OFFSET($a0)  # load dex_cache_resolved_methods
+    dsll    $t0, 2                 # convert target method offset to bytes
+    daddu   $a0, $t0               # get address of target method
+    dla     $t9, art_quick_invoke_interface_trampoline
+    jalr    $zero, $t9
+    lwu     $a0, MIRROR_OBJECT_ARRAY_DATA_OFFSET($a0)  # load the target method
+END art_quick_imt_conflict_trampoline
 
     .extern artQuickResolutionTrampoline
 ENTRY art_quick_resolution_trampoline
@@ -930,6 +1513,18 @@
     .cfi_adjust_cfa_offset -(16+FRAME_SIZE_REFS_ONLY_CALLEE_SAVE)
 END art_quick_instrumentation_exit
 
-UNIMPLEMENTED art_quick_deoptimize
+    /*
+     * Instrumentation has requested that we deoptimize into the interpreter. The deoptimization
+     * will long jump to the upcall with a special exception of -1.
+     */
+    .extern artDeoptimize
+    .extern artEnterInterpreterFromDeoptimize
+ENTRY art_quick_deoptimize
+    SETUP_SAVE_ALL_CALLEE_SAVE_FRAME
+    jal      artDeoptimize     # artDeoptimize(Thread*, SP)
+                               # Returns caller method's frame size.
+    move     $a0, rSELF        # pass Thread::current
+END art_quick_deoptimize
+
 UNIMPLEMENTED art_quick_indexof
 UNIMPLEMENTED art_quick_string_compareto
diff --git a/runtime/base/allocator.h b/runtime/base/allocator.h
index 8720f0e..2d67c8b 100644
--- a/runtime/base/allocator.h
+++ b/runtime/base/allocator.h
@@ -18,6 +18,7 @@
 #define ART_RUNTIME_BASE_ALLOCATOR_H_
 
 #include <map>
+#include <set>
 
 #include "atomic.h"
 #include "base/macros.h"
diff --git a/runtime/dex_file.cc b/runtime/dex_file.cc
index 19a4bd0..edd8bfe 100644
--- a/runtime/dex_file.cc
+++ b/runtime/dex_file.cc
@@ -100,7 +100,8 @@
   if (IsZipMagic(magic)) {
     std::unique_ptr<ZipArchive> zip_archive(ZipArchive::OpenFromFd(fd.release(), filename, error_msg));
     if (zip_archive.get() == NULL) {
-      *error_msg = StringPrintf("Failed to open zip archive '%s'", file_part);
+      *error_msg = StringPrintf("Failed to open zip archive '%s' (error msg: %s)", file_part,
+                                error_msg->c_str());
       return false;
     }
     std::unique_ptr<ZipEntry> zip_entry(zip_archive->Find(zip_entry_name, error_msg));
diff --git a/runtime/entrypoints/quick/quick_trampoline_entrypoints.cc b/runtime/entrypoints/quick/quick_trampoline_entrypoints.cc
index 00251ff..70ee042 100644
--- a/runtime/entrypoints/quick/quick_trampoline_entrypoints.cc
+++ b/runtime/entrypoints/quick/quick_trampoline_entrypoints.cc
@@ -154,8 +154,6 @@
   // | F7         |    f_arg7
   // | F6         |    f_arg6
   // | F5         |    f_arg5
-  // | F6         |    f_arg6
-  // | F5         |    f_arg5
   // | F4         |    f_arg4
   // | F3         |    f_arg3
   // | F2         |    f_arg2
diff --git a/runtime/gc/collector/concurrent_copying.cc b/runtime/gc/collector/concurrent_copying.cc
index 057eed1..dd45eca 100644
--- a/runtime/gc/collector/concurrent_copying.cc
+++ b/runtime/gc/collector/concurrent_copying.cc
@@ -206,7 +206,7 @@
     }
     cc->is_marking_ = true;
     if (UNLIKELY(Runtime::Current()->IsActiveTransaction())) {
-      CHECK(Runtime::Current()->IsCompiler());
+      CHECK(Runtime::Current()->IsAotCompiler());
       TimingLogger::ScopedTiming split2("(Paused)VisitTransactionRoots", cc->GetTimings());
       Runtime::Current()->VisitTransactionRoots(ConcurrentCopying::ProcessRootCallback, cc);
     }
diff --git a/runtime/gc/heap.cc b/runtime/gc/heap.cc
index 9343622..7534515 100644
--- a/runtime/gc/heap.cc
+++ b/runtime/gc/heap.cc
@@ -438,20 +438,31 @@
   // Create our garbage collectors.
   for (size_t i = 0; i < 2; ++i) {
     const bool concurrent = i != 0;
-    garbage_collectors_.push_back(new collector::MarkSweep(this, concurrent));
-    garbage_collectors_.push_back(new collector::PartialMarkSweep(this, concurrent));
-    garbage_collectors_.push_back(new collector::StickyMarkSweep(this, concurrent));
+    if ((MayUseCollector(kCollectorTypeCMS) && concurrent) ||
+        (MayUseCollector(kCollectorTypeMS) && !concurrent)) {
+      garbage_collectors_.push_back(new collector::MarkSweep(this, concurrent));
+      garbage_collectors_.push_back(new collector::PartialMarkSweep(this, concurrent));
+      garbage_collectors_.push_back(new collector::StickyMarkSweep(this, concurrent));
+    }
   }
   if (kMovingCollector) {
-    // TODO: Clean this up.
-    const bool generational = foreground_collector_type_ == kCollectorTypeGSS;
-    semi_space_collector_ = new collector::SemiSpace(this, generational,
-                                                     generational ? "generational" : "");
-    garbage_collectors_.push_back(semi_space_collector_);
-    concurrent_copying_collector_ = new collector::ConcurrentCopying(this);
-    garbage_collectors_.push_back(concurrent_copying_collector_);
-    mark_compact_collector_ = new collector::MarkCompact(this);
-    garbage_collectors_.push_back(mark_compact_collector_);
+    if (MayUseCollector(kCollectorTypeSS) || MayUseCollector(kCollectorTypeGSS) ||
+        MayUseCollector(kCollectorTypeHomogeneousSpaceCompact) ||
+        use_homogeneous_space_compaction_for_oom_) {
+      // TODO: Clean this up.
+      const bool generational = foreground_collector_type_ == kCollectorTypeGSS;
+      semi_space_collector_ = new collector::SemiSpace(this, generational,
+                                                       generational ? "generational" : "");
+      garbage_collectors_.push_back(semi_space_collector_);
+    }
+    if (MayUseCollector(kCollectorTypeCC)) {
+      concurrent_copying_collector_ = new collector::ConcurrentCopying(this);
+      garbage_collectors_.push_back(concurrent_copying_collector_);
+    }
+    if (MayUseCollector(kCollectorTypeMC)) {
+      mark_compact_collector_ = new collector::MarkCompact(this);
+      garbage_collectors_.push_back(mark_compact_collector_);
+    }
   }
   if (GetImageSpace() != nullptr && non_moving_space_ != nullptr &&
       (is_zygote || separate_non_moving_space || foreground_collector_type_ == kCollectorTypeGSS)) {
@@ -487,6 +498,10 @@
   return nullptr;
 }
 
+bool Heap::MayUseCollector(CollectorType type) const {
+  return foreground_collector_type_ == type || background_collector_type_ == type;
+}
+
 space::MallocSpace* Heap::CreateMallocSpaceFromMemMap(MemMap* mem_map, size_t initial_size,
                                                       size_t growth_limit, size_t capacity,
                                                       const char* name, bool can_move_objects) {
diff --git a/runtime/gc/heap.h b/runtime/gc/heap.h
index b2478e6..d41e17fb 100644
--- a/runtime/gc/heap.h
+++ b/runtime/gc/heap.h
@@ -661,6 +661,9 @@
   // Request asynchronous GC.
   void RequestConcurrentGC(Thread* self) LOCKS_EXCLUDED(pending_task_lock_);
 
+  // Whether or not we may use a garbage collector, used so that we only create collectors we need.
+  bool MayUseCollector(CollectorType type) const;
+
  private:
   class ConcurrentGCTask;
   class CollectorTransitionTask;
diff --git a/runtime/hprof/hprof.cc b/runtime/hprof/hprof.cc
index d2e93bc..5a7b7e1 100644
--- a/runtime/hprof/hprof.cc
+++ b/runtime/hprof/hprof.cc
@@ -1008,17 +1008,26 @@
       HprofBasicType t = SignatureToBasicTypeAndSize(f->GetTypeDescriptor(), &size);
       __ AddStringId(LookupStringId(f->GetName()));
       __ AddU1(t);
-      switch (size) {
-        case 1:
-          __ AddU1(static_cast<uint8_t>(f->Get32(klass)));
+      switch (t) {
+        case hprof_basic_byte:
+          __ AddU1(f->GetByte(klass));
           break;
-        case 2:
-          __ AddU2(static_cast<uint16_t>(f->Get32(klass)));
+        case hprof_basic_boolean:
+          __ AddU1(f->GetBoolean(klass));
           break;
-        case 4:
+        case hprof_basic_char:
+          __ AddU2(f->GetChar(klass));
+          break;
+        case hprof_basic_short:
+          __ AddU2(f->GetShort(klass));
+          break;
+        case hprof_basic_float:
+        case hprof_basic_int:
+        case hprof_basic_object:
           __ AddU4(f->Get32(klass));
           break;
-        case 8:
+        case hprof_basic_double:
+        case hprof_basic_long:
           __ AddU8(f->Get64(klass));
           break;
         default:
@@ -1099,16 +1108,29 @@
     for (int i = 0; i < ifieldCount; ++i) {
       mirror::ArtField* f = klass->GetInstanceField(i);
       size_t size;
-      SignatureToBasicTypeAndSize(f->GetTypeDescriptor(), &size);
-      if (size == 1) {
-        __ AddU1(f->Get32(obj));
-      } else if (size == 2) {
-        __ AddU2(f->Get32(obj));
-      } else if (size == 4) {
+      auto t = SignatureToBasicTypeAndSize(f->GetTypeDescriptor(), &size);
+      switch (t) {
+      case hprof_basic_byte:
+        __ AddU1(f->GetByte(obj));
+        break;
+      case hprof_basic_boolean:
+        __ AddU1(f->GetBoolean(obj));
+        break;
+      case hprof_basic_char:
+        __ AddU2(f->GetChar(obj));
+        break;
+      case hprof_basic_short:
+        __ AddU2(f->GetShort(obj));
+        break;
+      case hprof_basic_float:
+      case hprof_basic_int:
+      case hprof_basic_object:
         __ AddU4(f->Get32(obj));
-      } else {
-        CHECK_EQ(size, 8U);
+        break;
+      case hprof_basic_double:
+      case hprof_basic_long:
         __ AddU8(f->Get64(obj));
+        break;
       }
     }
 
diff --git a/runtime/interpreter/interpreter_common.cc b/runtime/interpreter/interpreter_common.cc
index 3f09bd3..e6e647c 100644
--- a/runtime/interpreter/interpreter_common.cc
+++ b/runtime/interpreter/interpreter_common.cc
@@ -837,7 +837,7 @@
   } else if (name == "java.lang.Class java.lang.Void.lookupType()") {
     result->SetL(Runtime::Current()->GetClassLinker()->FindPrimitiveClass('V'));
   } else if (name == "java.lang.Object java.lang.Class.newInstance()") {
-    StackHandleScope<2> hs(self);
+    StackHandleScope<3> hs(self);  // Class, constructor, object.
     Class* klass = shadow_frame->GetVRegReference(arg_offset)->AsClass();
     Handle<Class> h_klass(hs.NewHandle(klass));
     // There are two situations in which we'll abort this run.
@@ -846,13 +846,15 @@
     // Note that 2) could likely be handled here, but for safety abort the transaction.
     bool ok = false;
     if (Runtime::Current()->GetClassLinker()->EnsureInitialized(self, h_klass, true, true)) {
-      ArtMethod* c = h_klass->FindDeclaredDirectMethod("<init>", "()V");
-      if (c != nullptr) {
-        Handle<Object> obj(hs.NewHandle(klass->AllocObject(self)));
-        CHECK(obj.Get() != nullptr);  // We don't expect OOM at compile-time.
-        EnterInterpreterFromInvoke(self, c, obj.Get(), nullptr, nullptr);
-        result->SetL(obj.Get());
-        ok = true;
+      Handle<ArtMethod> h_cons(hs.NewHandle(h_klass->FindDeclaredDirectMethod("<init>", "()V")));
+      if (h_cons.Get() != nullptr) {
+        Handle<Object> h_obj(hs.NewHandle(klass->AllocObject(self)));
+        CHECK(h_obj.Get() != nullptr);  // We don't expect OOM at compile-time.
+        EnterInterpreterFromInvoke(self, h_cons.Get(), h_obj.Get(), nullptr, nullptr);
+        if (!self->IsExceptionPending()) {
+          result->SetL(h_obj.Get());
+          ok = true;
+        }
       } else {
         self->ThrowNewExceptionF(self->GetCurrentLocationForThrow(), "Ljava/lang/InternalError;",
                                  "Could not find default constructor for '%s'",
@@ -890,8 +892,8 @@
       }
     }
     CHECK(found != NULL)
-      << "Failed to find field in Class.getDeclaredField in un-started runtime. name="
-      << name2->ToModifiedUtf8() << " class=" << PrettyDescriptor(klass);
+        << "Failed to find field in Class.getDeclaredField in un-started runtime. name="
+        << name2->ToModifiedUtf8() << " class=" << PrettyDescriptor(klass);
     // TODO: getDeclaredField calls GetType once the field is found to ensure a
     //       NoClassDefFoundError is thrown if the field's type cannot be resolved.
     Class* jlr_Field = self->DecodeJObject(WellKnownClasses::java_lang_reflect_Field)->AsClass();
@@ -910,7 +912,8 @@
     mirror::ArtMethod* method = shadow_frame->GetVRegReference(arg_offset)->AsArtMethod();
     result->SetL(method->GetNameAsString(self));
   } else if (name == "void java.lang.System.arraycopy(java.lang.Object, int, java.lang.Object, int, int)" ||
-             name == "void java.lang.System.arraycopy(char[], int, char[], int, int)") {
+             name == "void java.lang.System.arraycopy(char[], int, char[], int, int)" ||
+             name == "void java.lang.System.arraycopy(int[], int, int[], int, int)") {
     // Special case array copying without initializing System.
     Class* ctype = shadow_frame->GetVRegReference(arg_offset)->GetClass()->GetComponentType();
     jint srcPos = shadow_frame->GetVReg(arg_offset + 1);
@@ -972,22 +975,24 @@
         std::string caller2(PrettyMethod(shadow_frame->GetLink()->GetLink()->GetMethod()));
         if (caller2 == "java.lang.String java.lang.Double.toString(double)") {
           // Allocate new object.
-          mirror::Class* real_to_string_class =
-              shadow_frame->GetLink()->GetMethod()->GetDeclaringClass();
-          mirror::Object* real_to_string_obj = real_to_string_class->AllocObject(self);
-          if (real_to_string_obj != nullptr) {
+          StackHandleScope<2> hs(self);
+          Handle<Class> h_real_to_string_class(hs.NewHandle(
+              shadow_frame->GetLink()->GetMethod()->GetDeclaringClass()));
+          Handle<Object> h_real_to_string_obj(hs.NewHandle(
+              h_real_to_string_class->AllocObject(self)));
+          if (h_real_to_string_obj.Get() != nullptr) {
             mirror::ArtMethod* init_method =
-                real_to_string_class->FindDirectMethod("<init>", "()V");
+                h_real_to_string_class->FindDirectMethod("<init>", "()V");
             if (init_method == nullptr) {
-              real_to_string_class->DumpClass(LOG(FATAL), mirror::Class::kDumpClassFullDetail);
-            }
-            JValue invoke_result;
-            // One arg, this.
-            uint32_t args = static_cast<uint32_t>(reinterpret_cast<uintptr_t>(real_to_string_obj));
-            init_method->Invoke(self, &args, 4, &invoke_result, init_method->GetShorty());
-            if (!self->IsExceptionPending()) {
-              result->SetL(real_to_string_obj);
-              ok = true;
+              h_real_to_string_class->DumpClass(LOG(FATAL), mirror::Class::kDumpClassFullDetail);
+            } else {
+              JValue invoke_result;
+              EnterInterpreterFromInvoke(self, init_method, h_real_to_string_obj.Get(), nullptr,
+                                         nullptr);
+              if (!self->IsExceptionPending()) {
+                result->SetL(h_real_to_string_obj.Get());
+                ok = true;
+              }
             }
           }
 
diff --git a/runtime/jdwp/jdwp_main.cc b/runtime/jdwp/jdwp_main.cc
index b6fedd9..3d69796 100644
--- a/runtime/jdwp/jdwp_main.cc
+++ b/runtime/jdwp/jdwp_main.cc
@@ -408,7 +408,7 @@
 void JdwpState::Run() {
   Runtime* runtime = Runtime::Current();
   CHECK(runtime->AttachCurrentThread("JDWP", true, runtime->GetSystemThreadGroup(),
-                                     !runtime->IsCompiler()));
+                                     !runtime->IsAotCompiler()));
 
   VLOG(jdwp) << "JDWP: thread running";
 
diff --git a/runtime/parsed_options.cc b/runtime/parsed_options.cc
index 9d87ed7..607569a 100644
--- a/runtime/parsed_options.cc
+++ b/runtime/parsed_options.cc
@@ -146,8 +146,9 @@
       .Define({"-XX:EnableHSpaceCompactForOOM", "-XX:DisableHSpaceCompactForOOM"})
           .WithValues({true, false})
           .IntoKey(M::EnableHSpaceCompactForOOM)
-      .Define({"-Xjit", "-Xnojit"})
-          .WithValues({true, false})
+      .Define("-Xusejit:_")
+          .WithType<bool>()
+          .WithValueMap({{"false", false}, {"true", true}})
           .IntoKey(M::UseJIT)
       .Define("-Xjitcodecachesize:_")
           .WithType<MemoryKiB>()
@@ -642,8 +643,7 @@
   UsageMessage(stream, "  -Xcompiler-option dex2oat-option\n");
   UsageMessage(stream, "  -Ximage-compiler-option dex2oat-option\n");
   UsageMessage(stream, "  -Xpatchoat:filename\n");
-  UsageMessage(stream, "  -Xjit\n");
-  UsageMessage(stream, "  -Xnojit\n");
+  UsageMessage(stream, "  -Xusejit:booleanvalue\n");
   UsageMessage(stream, "  -X[no]relocate\n");
   UsageMessage(stream, "  -X[no]dex2oat (Whether to invoke dex2oat on the application)\n");
   UsageMessage(stream, "  -X[no]image-dex2oat (Whether to create and use a boot image)\n");
diff --git a/runtime/runtime.cc b/runtime/runtime.cc
index 2043672..70de0db 100644
--- a/runtime/runtime.cc
+++ b/runtime/runtime.cc
@@ -843,7 +843,7 @@
     Dbg::ConfigureJdwp(runtime_options.GetOrDefault(Opt::JdwpOptions));
   }
 
-  if (!IsCompiler()) {
+  if (!IsAotCompiler()) {
     // If we are already the compiler at this point, we must be dex2oat. Don't create the jit in
     // this case.
     // If runtime_options doesn't have UseJIT set to true then CreateFromRuntimeArguments returns
@@ -1548,14 +1548,14 @@
   if (!IsActiveTransaction()) {
     return false;
   } else {
-    DCHECK(IsCompiler());
+    DCHECK(IsAotCompiler());
     return preinitialization_transaction_->IsAborted();
   }
 }
 
 void Runtime::AbortTransactionAndThrowInternalError(Thread* self,
                                                     const std::string& abort_message) {
-  DCHECK(IsCompiler());
+  DCHECK(IsAotCompiler());
   DCHECK(IsActiveTransaction());
   // Throwing an exception may cause its class initialization. If we mark the transaction
   // aborted before that, we may warn with a false alarm. Throwing the exception before
@@ -1565,35 +1565,35 @@
 }
 
 void Runtime::ThrowInternalErrorForAbortedTransaction(Thread* self) {
-  DCHECK(IsCompiler());
+  DCHECK(IsAotCompiler());
   DCHECK(IsActiveTransaction());
   preinitialization_transaction_->ThrowInternalError(self, true);
 }
 
 void Runtime::RecordWriteFieldBoolean(mirror::Object* obj, MemberOffset field_offset,
                                       uint8_t value, bool is_volatile) const {
-  DCHECK(IsCompiler());
+  DCHECK(IsAotCompiler());
   DCHECK(IsActiveTransaction());
   preinitialization_transaction_->RecordWriteFieldBoolean(obj, field_offset, value, is_volatile);
 }
 
 void Runtime::RecordWriteFieldByte(mirror::Object* obj, MemberOffset field_offset,
                                    int8_t value, bool is_volatile) const {
-  DCHECK(IsCompiler());
+  DCHECK(IsAotCompiler());
   DCHECK(IsActiveTransaction());
   preinitialization_transaction_->RecordWriteFieldByte(obj, field_offset, value, is_volatile);
 }
 
 void Runtime::RecordWriteFieldChar(mirror::Object* obj, MemberOffset field_offset,
                                    uint16_t value, bool is_volatile) const {
-  DCHECK(IsCompiler());
+  DCHECK(IsAotCompiler());
   DCHECK(IsActiveTransaction());
   preinitialization_transaction_->RecordWriteFieldChar(obj, field_offset, value, is_volatile);
 }
 
 void Runtime::RecordWriteFieldShort(mirror::Object* obj, MemberOffset field_offset,
                                     int16_t value, bool is_volatile) const {
-  DCHECK(IsCompiler());
+  DCHECK(IsAotCompiler());
   DCHECK(IsActiveTransaction());
   preinitialization_transaction_->RecordWriteFieldShort(obj, field_offset, value, is_volatile);
 }
@@ -1671,6 +1671,10 @@
   std::string feature_string("--instruction-set-features=");
   feature_string += features->GetFeatureString();
   argv->push_back(feature_string);
+
+  if (Dbg::IsJdwpConfigured()) {
+    argv->push_back("--debuggable");
+  }
 }
 
 void Runtime::UpdateProfilerState(int state) {
diff --git a/runtime/stack_map.h b/runtime/stack_map.h
index ec37699..6d99672 100644
--- a/runtime/stack_map.h
+++ b/runtime/stack_map.h
@@ -146,13 +146,14 @@
     return sizeof(LocationKind) + sizeof(int32_t);
   }
 
- private:
+  size_t Size() const {
+    return region_.size();
+  }
+
   static constexpr int kFixedSize = 0;
 
+ private:
   MemoryRegion region_;
-
-  friend class CodeInfo;
-  friend class StackMapStream;
 };
 
 /**
diff --git a/test/031-class-attributes/expected.txt b/test/031-class-attributes/expected.txt
index 4ae1eed..de99872 100644
--- a/test/031-class-attributes/expected.txt
+++ b/test/031-class-attributes/expected.txt
@@ -33,8 +33,8 @@
   enclosingMeth: null
   modifiers: 1
   package: null
-  declaredClasses: [10] class ClassAttrs$PublicMemberClass, class ClassAttrs$MemberClass, interface ClassAttrs$PackagePrivateInnerInterface, interface ClassAttrs$PrivateInnerInterface, interface ClassAttrs$ProtectedInnerInterface, interface ClassAttrs$PublicInnerInterface, class ClassAttrs$PackagePrivateInnerClass, class ClassAttrs$PrivateInnerClass, class ClassAttrs$ProtectedInnerClass, class ClassAttrs$PublicInnerClass
-  member classes: [3] class ClassAttrs$PublicMemberClass, interface ClassAttrs$PublicInnerInterface, class ClassAttrs$PublicInnerClass
+  declaredClasses: [10] class ClassAttrs$MemberClass, class ClassAttrs$PackagePrivateInnerClass, class ClassAttrs$PrivateInnerClass, class ClassAttrs$ProtectedInnerClass, class ClassAttrs$PublicInnerClass, class ClassAttrs$PublicMemberClass, interface ClassAttrs$PackagePrivateInnerInterface, interface ClassAttrs$PrivateInnerInterface, interface ClassAttrs$ProtectedInnerInterface, interface ClassAttrs$PublicInnerInterface
+  member classes: [3] class ClassAttrs$PublicInnerClass, class ClassAttrs$PublicMemberClass, interface ClassAttrs$PublicInnerInterface
   isAnnotation: false
   isAnonymous: false
   isArray: false
diff --git a/test/031-class-attributes/src/ClassAttrs.java b/test/031-class-attributes/src/ClassAttrs.java
index ae8b2f5..c2e41c5 100644
--- a/test/031-class-attributes/src/ClassAttrs.java
+++ b/test/031-class-attributes/src/ClassAttrs.java
@@ -9,6 +9,9 @@
 import java.lang.reflect.Modifier;
 import java.lang.reflect.Type;
 import java.lang.reflect.TypeVariable;
+import java.util.ArrayList;
+import java.util.Collections;
+import java.util.List;
 
 public class ClassAttrs {
     ClassAttrs() {
@@ -323,19 +326,28 @@
      * Convert an array of Type into a string.  Start with an array count.
      */
     private static String stringifyTypeArray(Type[] types) {
+        List<String> typeStringList = new ArrayList<String>();
+        for (Type t : types) {
+          typeStringList.add(t.toString());
+        }
+        // Sort types alphabetically so they're always printed in the same order.
+        // For instance, Class.getClasses() does not guarantee any order for the
+        // returned Class[].
+        Collections.sort(typeStringList);
+
         StringBuilder stb = new StringBuilder();
         boolean first = true;
 
         stb.append("[" + types.length + "]");
 
-        for (Type t: types) {
+        for (String typeString : typeStringList) {
             if (first) {
                 stb.append(" ");
                 first = false;
             } else {
                 stb.append(", ");
             }
-            stb.append(t.toString());
+            stb.append(typeString);
         }
 
         return stb.toString();
diff --git a/test/456-baseline-array-set/expected.txt b/test/456-baseline-array-set/expected.txt
new file mode 100644
index 0000000..e69de29
--- /dev/null
+++ b/test/456-baseline-array-set/expected.txt
diff --git a/test/456-baseline-array-set/info.txt b/test/456-baseline-array-set/info.txt
new file mode 100644
index 0000000..cf4137a
--- /dev/null
+++ b/test/456-baseline-array-set/info.txt
@@ -0,0 +1,3 @@
+Test for optimizing on x86, where we could run out
+of available registers when using the baseline register
+allocator.
diff --git a/test/456-baseline-array-set/src/Main.java b/test/456-baseline-array-set/src/Main.java
new file mode 100644
index 0000000..5475b41
--- /dev/null
+++ b/test/456-baseline-array-set/src/Main.java
@@ -0,0 +1,33 @@
+/*
+ * Copyright (C) 2015 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+public class Main {
+  public static void main(String[] args) {
+    doArrayAccess(new Integer(1), 0);
+  }
+
+  public static void doArrayAccess(Integer value, int index) {
+    try {
+      Integer[] array = new Integer[2];
+      // If we were to do optimization on the baseline register
+      // allocator, generating code for the array set would fail on x86.
+      array[index] = array[index + 1];
+      array[index] = value;
+    } catch (ArrayStoreException e) {
+      throw e;
+    }
+  }
+}
diff --git a/test/Android.run-test.mk b/test/Android.run-test.mk
index 10c422e..da36056 100644
--- a/test/Android.run-test.mk
+++ b/test/Android.run-test.mk
@@ -130,6 +130,10 @@
 ifeq ($(ART_TEST_RUN_TEST_NDEBUG),true)
   RUN_TYPES += ndebug
 endif
+DEBUGGABLE_TYPES := nondebuggable
+ifeq ($(ART_TEST_RUN_TEST_DEBUGGABLE),true)
+DEBUGGABLE_TYPES += debuggable
+endif
 ADDRESS_SIZES_TARGET := $(ART_PHONY_TEST_TARGET_SUFFIX)
 ADDRESS_SIZES_HOST := $(ART_PHONY_TEST_HOST_SUFFIX)
 ifeq ($(ART_TEST_RUN_TEST_2ND_ARCH),true)
@@ -150,16 +154,17 @@
                 $(foreach jni, $(8), \
                   $(foreach image, $(9), \
                     $(foreach pictest, $(10), \
-                      $(foreach test, $(11), \
-                        $(foreach address_size, $(12), \
-                          test-art-$(target)-run-test-$(run-type)-$(prebuild)-$(compiler)-$(relocate)-$(trace)-$(gc)-$(jni)-$(image)-$(pictest)-$(test)$(address_size) \
-                    ))))))))))))
+                      $(foreach debuggable, $(11), \
+                        $(foreach test, $(12), \
+                          $(foreach address_size, $(13), \
+                            test-art-$(target)-run-test-$(run-type)-$(prebuild)-$(compiler)-$(relocate)-$(trace)-$(gc)-$(jni)-$(image)-$(pictest)-$(debuggable)-$(test)$(address_size) \
+                    )))))))))))))
 endef  # all-run-test-names
 
 # To generate a full list or tests:
 # $(call all-run-test-names,$(TARGET_TYPES),$(RUN_TYPES),$(PREBUILD_TYPES),$(COMPILER_TYPES), \
 #        $(RELOCATE_TYPES),$(TRACE_TYPES),$(GC_TYPES),$(JNI_TYPES),$(IMAGE_TYPES), \
-#        $(TEST_ART_RUN_TESTS), $(ALL_ADDRESS_SIZES))
+#        $(DEBUGGABLE_TYPES) $(TEST_ART_RUN_TESTS), $(ALL_ADDRESS_SIZES)
 
 # Convert's a rule name to the form used in variables, e.g. no-relocate to NO_RELOCATE
 define name-to-var
@@ -176,7 +181,7 @@
 ifdef dist_goal
   ART_TEST_KNOWN_BROKEN += $(call all-run-test-names,$(TARGET_TYPES),$(RUN_TYPES),$(PREBUILD_TYPES), \
         $(COMPILER_TYPES),$(RELOCATE_TYPES),$(TRACE_TYPES),$(GC_TYPES),$(JNI_TYPES), \
-        $(IMAGE_TYPES), $(PICTEST_TYPES), $(TEST_ART_TIMING_SENSITIVE_RUN_TESTS), $(ALL_ADDRESS_SIZES))
+        $(IMAGE_TYPES), $(PICTEST_TYPES), $(DEBUGGABLE_TYPES), $(TEST_ART_TIMING_SENSITIVE_RUN_TESTS), $(ALL_ADDRESS_SIZES))
 endif
 
 TEST_ART_TIMING_SENSITIVE_RUN_TESTS :=
@@ -190,7 +195,7 @@
 ifneq (,$(filter prebuild,$(PREBUILD_TYPES)))
   ART_TEST_KNOWN_BROKEN += $(call all-run-test-names,$(TARGET_TYPES),$(RUN_TYPES),prebuild, \
       $(COMPILER_TYPES),$(RELOCATE_TYPES),$(TRACE_TYPES),$(GC_TYPES),$(JNI_TYPES), \
-      $(IMAGE_TYPES), $(PICTEST_TYPES), $(TEST_ART_BROKEN_PREBUILD_RUN_TESTS), $(ALL_ADDRESS_SIZES))
+      $(IMAGE_TYPES), $(PICTEST_TYPES), $(DEBUGGABLE_TYPES), $(TEST_ART_BROKEN_PREBUILD_RUN_TESTS), $(ALL_ADDRESS_SIZES))
 endif
 
 TEST_ART_BROKEN_PREBUILD_RUN_TESTS :=
@@ -201,7 +206,7 @@
 ifneq (,$(filter no-prebuild,$(PREBUILD_TYPES)))
   ART_TEST_KNOWN_BROKEN += $(call all-run-test-names,$(TARGET_TYPES),$(RUN_TYPES),no-prebuild, \
       $(COMPILER_TYPES),$(RELOCATE_TYPES),$(TRACE_TYPES),$(GC_TYPES),$(JNI_TYPES), \
-      $(IMAGE_TYPES), $(PICTEST_TYPES), $(TEST_ART_BROKEN_NO_PREBUILD_TESTS), $(ALL_ADDRESS_SIZES))
+      $(IMAGE_TYPES), $(PICTEST_TYPES), $(DEBUGGABLE_TYPES), $(TEST_ART_BROKEN_NO_PREBUILD_TESTS), $(ALL_ADDRESS_SIZES))
 endif
 
 TEST_ART_BROKEN_NO_PREBUILD_TESTS :=
@@ -216,7 +221,7 @@
 ifneq (,$(filter no-relocate,$(RELOCATE_TYPES)))
   ART_TEST_KNOWN_BROKEN += $(call all-run-test-names,$(TARGET_TYPES),$(RUN_TYPES),$(PREBUILD_TYPES), \
       $(COMPILER_TYPES), no-relocate,$(TRACE_TYPES),$(GC_TYPES),$(JNI_TYPES), \
-      $(IMAGE_TYPES), $(PICTEST_TYPES), $(TEST_ART_BROKEN_NO_RELOCATE_TESTS), $(ALL_ADDRESS_SIZES))
+      $(IMAGE_TYPES), $(PICTEST_TYPES), $(DEBUGGABLE_TYPES), $(TEST_ART_BROKEN_NO_RELOCATE_TESTS), $(ALL_ADDRESS_SIZES))
 endif
 
 TEST_ART_BROKEN_NO_RELOCATE_TESTS :=
@@ -227,14 +232,14 @@
 ifneq (,$(filter gcstress,$(GC_TYPES)))
   ART_TEST_KNOWN_BROKEN += $(call all-run-test-names,$(TARGET_TYPES),$(RUN_TYPES),$(PREBUILD_TYPES), \
       $(COMPILER_TYPES),$(RELOCATE_TYPES),$(TRACE_TYPES),gcstress,$(JNI_TYPES), \
-      $(IMAGE_TYPES), $(PICTEST_TYPES), $(TEST_ART_BROKEN_GCSTRESS_RUN_TESTS), $(ALL_ADDRESS_SIZES))
+      $(IMAGE_TYPES), $(PICTEST_TYPES), $(DBEUGGABLE_TYPES), $(TEST_ART_BROKEN_GCSTRESS_RUN_TESTS), $(ALL_ADDRESS_SIZES))
 endif
 
 TEST_ART_BROKEN_GCSTRESS_RUN_TESTS :=
 
 # 115-native-bridge setup is complicated. Need to implement it correctly for the target.
 ART_TEST_KNOWN_BROKEN += $(call all-run-test-names,target,$(RUN_TYPES),$(PREBUILD_TYPES),$(COMPILER_TYPES), \
-    $(RELOCATE_TYPES),$(TRACE_TYPES),$(GC_TYPES),$(JNI_TYPES),$(IMAGE_TYPES),$(PICTEST_TYPES),115-native-bridge, \
+    $(RELOCATE_TYPES),$(TRACE_TYPES),$(GC_TYPES),$(JNI_TYPES),$(IMAGE_TYPES),$(PICTEST_TYPES),$(DEBUGGABLE_TYPES), 115-native-bridge, \
     $(ALL_ADDRESS_SIZES))
 
 # 130-hprof dumps the heap and runs hprof-conv to check whether the file is somewhat readable. This
@@ -243,7 +248,7 @@
 #       very hard to write here, as (for a complete test) JDWP must be set up.
 ART_TEST_KNOWN_BROKEN += $(call all-run-test-names,target,$(RUN_TYPES),$(PREBUILD_TYPES), \
     $(COMPILER_TYPES),$(RELOCATE_TYPES),$(TRACE_TYPES),$(GC_TYPES),$(JNI_TYPES),$(IMAGE_TYPES), \
-    $(PICTEST_TYPES),130-hprof,$(ALL_ADDRESS_SIZES))
+    $(PICTEST_TYPES),$(DEBUGGABLE_TYPES),130-hprof,$(ALL_ADDRESS_SIZES))
 
 # All these tests check that we have sane behavior if we don't have a patchoat or dex2oat.
 # Therefore we shouldn't run them in situations where we actually don't have these since they
@@ -257,20 +262,20 @@
 ifneq (,$(filter no-dex2oat,$(PREBUILD_TYPES)))
   ART_TEST_KNOWN_BROKEN += $(call all-run-test-names,$(TARGET_TYPES),$(RUN_TYPES),no-dex2oat, \
       $(COMPILER_TYPES),$(RELOCATE_TYPES),$(TRACE_TYPES),$(GC_TYPES),$(JNI_TYPES),$(IMAGE_TYPES), \
-      $(PICTEST_TYPES),$(TEST_ART_BROKEN_FALLBACK_RUN_TESTS),$(ALL_ADDRESS_SIZES))
+      $(PICTEST_TYPES),$(DEBUGGABLE_TYPES), $(TEST_ART_BROKEN_FALLBACK_RUN_TESTS),$(ALL_ADDRESS_SIZES))
 endif
 
 
 ifneq (,$(filter no-image,$(IMAGE_TYPES)))
   ART_TEST_KNOWN_BROKEN += $(call all-run-test-names,$(TARGET_TYPES),$(RUN_TYPES),$(PREBUILD_TYPES), \
       $(COMPILER_TYPES), $(RELOCATE_TYPES),$(TRACE_TYPES),$(GC_TYPES),$(JNI_TYPES),no-image, \
-      $(PICTEST_TYPES), $(TEST_ART_BROKEN_FALLBACK_RUN_TESTS),$(ALL_ADDRESS_SIZES))
+      $(PICTEST_TYPES), $(DEBUGGABLE_TYPES), $(TEST_ART_BROKEN_FALLBACK_RUN_TESTS),$(ALL_ADDRESS_SIZES))
 endif
 
 ifneq (,$(filter relocate-no-patchoat,$(RELOCATE_TYPES)))
   ART_TEST_KNOWN_BROKEN += $(call all-run-test-names,$(TARGET_TYPES),$(RUN_TYPES),$(PREBUILD_TYPES), \
       $(COMPILER_TYPES), relocate-no-patchoat,$(TRACE_TYPES),$(GC_TYPES),$(JNI_TYPES), \
-      $(IMAGE_TYPES),$(PICTEST_TYPES),$(TEST_ART_BROKEN_FALLBACK_RUN_TESTS),$(ALL_ADDRESS_SIZES))
+      $(IMAGE_TYPES),$(PICTEST_TYPES),$(DEBUGGABLE_TYPES), $(TEST_ART_BROKEN_FALLBACK_RUN_TESTS),$(ALL_ADDRESS_SIZES))
 endif
 
 TEST_ART_BROKEN_FALLBACK_RUN_TESTS :=
@@ -283,7 +288,7 @@
 ifneq (,$(filter trace,$(TRACE_TYPES)))
   ART_TEST_KNOWN_BROKEN += $(call all-run-test-names,$(TARGET_TYPES),$(RUN_TYPES),$(PREBUILD_TYPES), \
       $(COMPILER_TYPES),$(RELOCATE_TYPES),trace,$(GC_TYPES),$(JNI_TYPES),$(IMAGE_TYPES), \
-      $(PICTEST_TYPES),$(TEST_ART_BROKEN_TRACING_RUN_TESTS),$(ALL_ADDRESS_SIZES))
+      $(PICTEST_TYPES),$(DEBUGGABLE_TYPES), $(TEST_ART_BROKEN_TRACING_RUN_TESTS),$(ALL_ADDRESS_SIZES))
 endif
 
 TEST_ART_BROKEN_TRACING_RUN_TESTS :=
@@ -310,7 +315,7 @@
 ifneq (,$(filter ndebug,$(RUN_TYPES)))
   ART_TEST_KNOWN_BROKEN += $(call all-run-test-names,$(TARGET_TYPES),ndebug,$(PREBUILD_TYPES), \
       $(COMPILER_TYPES), $(RELOCATE_TYPES),$(TRACE_TYPES),$(GC_TYPES),$(JNI_TYPES),$(IMAGE_TYPES), \
-      $(PICTEST_TYPES),$(TEST_ART_BROKEN_NDEBUG_TESTS),$(ALL_ADDRESS_SIZES))
+      $(PICTEST_TYPES),$(DEBUGGABLE_TYPES),$(TEST_ART_BROKEN_NDEBUG_TESTS),$(ALL_ADDRESS_SIZES))
 endif
 
 TEST_ART_BROKEN_NDEBUG_TESTS :=
@@ -321,7 +326,7 @@
 ifneq (,$(filter default,$(COMPILER_TYPES)))
   ART_TEST_KNOWN_BROKEN += $(call all-run-test-names,$(TARGET_TYPES),$(RUN_TYPES),$(PREBUILD_TYPES), \
       default,$(RELOCATE_TYPES),$(TRACE_TYPES),$(GC_TYPES),$(JNI_TYPES), \
-      $(IMAGE_TYPES),$(PICTEST_TYPES),$(TEST_ART_BROKEN_DEFAULT_RUN_TESTS),$(ALL_ADDRESS_SIZES))
+      $(IMAGE_TYPES),$(PICTEST_TYPES),$(DEBUGGABLE_TYPES),$(TEST_ART_BROKEN_DEFAULT_RUN_TESTS),$(ALL_ADDRESS_SIZES))
 endif
 
 TEST_ART_BROKEN_DEFAULT_RUN_TESTS :=
@@ -335,7 +340,7 @@
 ifneq (,$(filter optimizing,$(COMPILER_TYPES)))
   ART_TEST_KNOWN_BROKEN += $(call all-run-test-names,$(TARGET_TYPES),$(RUN_TYPES),$(PREBUILD_TYPES), \
       optimizing,$(RELOCATE_TYPES),$(TRACE_TYPES),$(GC_TYPES),$(JNI_TYPES), \
-      $(IMAGE_TYPES),$(PICTEST_TYPES),$(TEST_ART_BROKEN_OPTIMIZING_32_RUN_TESTS),32)
+      $(IMAGE_TYPES),$(PICTEST_TYPES),$(DEBUGGABLE_TYPES),$(TEST_ART_BROKEN_OPTIMIZING_32_RUN_TESTS),32)
 endif
 
 TEST_ART_BROKEN_OPTIMIZING_32_RUN_TESTS :=
@@ -346,7 +351,7 @@
 ifneq (,$(filter optimizing,$(COMPILER_TYPES)))
   ART_TEST_KNOWN_BROKEN += $(call all-run-test-names,target,$(RUN_TYPES),$(PREBUILD_TYPES), \
       optimizing,$(RELOCATE_TYPES),$(TRACE_TYPES),$(GC_TYPES),$(JNI_TYPES), \
-      $(IMAGE_TYPES),$(PICTEST_TYPES),$(TEST_ART_BROKEN_OPTIMIZING_ARM64_RUN_TESTS),64)
+      $(IMAGE_TYPES),$(PICTEST_TYPES),$(DEBUGGABLE_TYPES),$(TEST_ART_BROKEN_OPTIMIZING_ARM64_RUN_TESTS),64)
 endif
 
 TEST_ART_BROKEN_OPTIMIZING_ARM64_RUN_TESTS :=
@@ -359,7 +364,7 @@
 ifneq (,$(filter optimizing,$(COMPILER_TYPES)))
   ART_TEST_KNOWN_BROKEN += $(call all-run-test-names,$(TARGET_TYPES),$(RUN_TYPES),$(PREBUILD_TYPES), \
       optimizing,$(RELOCATE_TYPES),$(TRACE_TYPES),$(GC_TYPES),$(JNI_TYPES), \
-      $(IMAGE_TYPES),$(PICTEST_TYPES),$(TEST_ART_BROKEN_OPTIMIZING_RUN_TESTS),$(ALL_ADDRESS_SIZES))
+      $(IMAGE_TYPES),$(PICTEST_TYPES),$(DEBUGGABLE_TYPES),$(TEST_ART_BROKEN_OPTIMIZING_RUN_TESTS),$(ALL_ADDRESS_SIZES))
 endif
 
 # If ART_USE_OPTIMIZING_COMPILER is set to true, then the default core.art has been
@@ -367,7 +372,7 @@
 ifeq ($(ART_USE_OPTIMIZING_COMPILER),true)
   ART_TEST_KNOWN_BROKEN += $(call all-run-test-names,$(TARGET_TYPES),$(RUN_TYPES),$(PREBUILD_TYPES), \
       default,$(RELOCATE_TYPES),$(TRACE_TYPES),$(GC_TYPES),$(JNI_TYPES), \
-      $(IMAGE_TYPES),$(PICTEST_TYPES),$(TEST_ART_BROKEN_OPTIMIZING_RUN_TESTS),$(ALL_ADDRESS_SIZES))
+      $(IMAGE_TYPES),$(PICTEST_TYPES),$(DEBUGGABLE_TYPES),$(TEST_ART_BROKEN_OPTIMIZING_RUN_TESTS),$(ALL_ADDRESS_SIZES))
 endif
 
 TEST_ART_BROKEN_OPTIMIZING_RUN_TESTS :=
@@ -405,6 +410,9 @@
 $(foreach target, $(TARGET_TYPES), \
   $(foreach run_type, $(RUN_TYPES), \
     $(eval ART_RUN_TEST_$(call name-to-var,$(target))_$(call name-to-var,$(run_type))_RULES :=)))
+$(foreach target, $(TARGET_TYPES), \
+  $(foreach debuggable_type, $(DEBUGGABLE_TYPES), \
+    $(eval ART_RUN_TEST_$(call name-to-var,$(target))_$(call name-to-var,$(debuggable_type))_RULES :=)))
 
 # We need dex2oat and dalvikvm on the target as well as the core images (all images as we sync
 # only once).
@@ -618,19 +626,30 @@
       $$(error found $(10) expected $(PICTEST_TYPES))
     endif
   endif
-  # $(11) is the test name
-  test_groups += ART_RUN_TEST_$$(uc_host_or_target)_$(call name-to-var,$(11))_RULES
-  ifeq ($(12),64)
+  ifeq ($(11),debuggable)
+    test_groups += ART_RUN_TEST_$$(uc_host_or_target)_DEBUGGABLE_RULES
+    run_test_options += --debuggable
+  else
+    ifeq ($(11),nondebuggable)
+    test_groups += ART_RUN_TEST_$$(uc_host_or_target)_NONDEBUGGABLE_RULES
+      # Nothing to be done.
+    else
+      $$(error found $(11) expected $(DEBUGGABLE_TYPES))
+    endif
+  endif
+  # $(12) is the test name
+  test_groups += ART_RUN_TEST_$$(uc_host_or_target)_$(call name-to-var,$(12))_RULES
+  ifeq ($(13),64)
     test_groups += ART_RUN_TEST_$$(uc_host_or_target)_64_RULES
     run_test_options += --64
   else
-    ifeq ($(12),32)
+    ifeq ($(13),32)
       test_groups += ART_RUN_TEST_$$(uc_host_or_target)_32_RULES
     else
-      $$(error found $(12) expected $(ALL_ADDRESS_SIZES))
+      $$(error found $(13) expected $(ALL_ADDRESS_SIZES))
     endif
   endif
-  run_test_rule_name := test-art-$(1)-run-test-$(2)-$(3)-$(4)-$(5)-$(6)-$(7)-$(8)-$(9)-$(10)-$(11)$(12)
+  run_test_rule_name := test-art-$(1)-run-test-$(2)-$(3)-$(4)-$(5)-$(6)-$(7)-$(8)-$(9)-$(10)-$(11)-$(12)$(13)
   run_test_options := --output-path $(ART_HOST_TEST_DIR)/run-test-output/$$(run_test_rule_name) \
       $$(run_test_options)
   ifneq ($(ART_TEST_ANDROID_ROOT),)
@@ -643,7 +662,7 @@
 	  DX=$(abspath $(DX)) JASMIN=$(abspath $(HOST_OUT_EXECUTABLES)/jasmin) \
 	    SMALI=$(abspath $(HOST_OUT_EXECUTABLES)/smali) \
 	    DXMERGER=$(abspath $(HOST_OUT_EXECUTABLES)/dexmerger) \
-	    art/test/run-test $$(PRIVATE_RUN_TEST_OPTIONS) $(11) \
+	    art/test/run-test $$(PRIVATE_RUN_TEST_OPTIONS) $(12) \
 	      && $$(call ART_TEST_PASSED,$$@) || $$(call ART_TEST_FAILED,$$@)
 	$$(hide) (echo $(MAKECMDGOALS) | grep -q $$@ && \
 	  echo "run-test run as top-level target, removing test directory $(ART_HOST_TEST_DIR)" && \
@@ -671,8 +690,9 @@
                   $(foreach jni, $(JNI_TYPES), \
                     $(foreach image, $(IMAGE_TYPES), \
                       $(foreach pictest, $(PICTEST_TYPES), \
-                        $(eval $(call define-test-art-run-test,$(target),$(run_type),$(prebuild),$(compiler),$(relocate),$(trace),$(gc),$(jni),$(image),$(pictest),$(test),$(address_size))) \
-                  ))))))))))))
+                        $(foreach debuggable, $(DEBUGGABLE_TYPES), \
+                          $(eval $(call define-test-art-run-test,$(target),$(run_type),$(prebuild),$(compiler),$(relocate),$(trace),$(gc),$(jni),$(image),$(pictest),$(debuggable),$(test),$(address_size))) \
+                  )))))))))))))
 define-test-art-run-test :=
 
 # Define a phony rule whose purpose is to test its prerequisites.
@@ -710,6 +730,9 @@
   $(foreach jni, $(JNI_TYPES), $(eval \
     $(call define-test-art-run-test-group,test-art-$(target)-run-test-$(jni),$(ART_RUN_TEST_$(call name-to-var,$(target))_$(call name-to-var,$(jni))_RULES)))))
 $(foreach target, $(TARGET_TYPES), \
+  $(foreach debuggable, $(DEBUGGABLE_TYPES), $(eval \
+    $(call define-test-art-run-test-group,test-art-$(target)-run-test-$(debuggable),$(ART_RUN_TEST_$(call name-to-var,$(target))_$(call name-to-var,$(debuggable))_RULES)))))
+$(foreach target, $(TARGET_TYPES), \
   $(foreach image, $(IMAGE_TYPES), $(eval \
     $(call define-test-art-run-test-group,test-art-$(target)-run-test-$(image),$(ART_RUN_TEST_$(call name-to-var,$(target))_$(call name-to-var,$(image))_RULES)))))
 $(foreach target, $(TARGET_TYPES), \
@@ -740,6 +763,9 @@
   $(foreach jni, $(JNI_TYPES), \
     $(eval ART_RUN_TEST_$(call name-to-var,$(target))_$(call name-to-var,$(jni))_RULES :=)))
 $(foreach target, $(TARGET_TYPES), \
+  $(foreach debuggable, $(DEBUGGABLE_TYPES), \
+    $(eval ART_RUN_TEST_$(call name-to-var,$(target))_$(call name-to-var,$(debuggable))_RULES :=)))
+$(foreach target, $(TARGET_TYPES), \
   $(foreach image, $(IMAGE_TYPES), \
     $(eval ART_RUN_TEST_$(call name-to-var,$(target))_$(call name-to-var,$(image))_RULES :=)))
 $(foreach target, $(TARGET_TYPES), \
@@ -764,6 +790,7 @@
 ADDRESS_SIZES_HOST :=
 ALL_ADDRESS_SIZES :=
 RUN_TYPES :=
+DEBUGGABLE_TYPES :=
 
 include $(LOCAL_PATH)/Android.libarttest.mk
 include art/test/Android.libnativebridgetest.mk
diff --git a/test/run-test b/test/run-test
index 52f5e0c..2950af1 100755
--- a/test/run-test
+++ b/test/run-test
@@ -147,6 +147,9 @@
         run_args="${run_args} --prebuild"
         prebuild_mode="yes"
         shift;
+    elif [ "x$1" = "x--debuggable" ]; then
+        run_args="${run_args} -Xcompiler-option --debuggable"
+        shift;
     elif [ "x$1" = "x--no-prebuild" ]; then
         run_args="${run_args} --no-prebuild"
         prebuild_mode="no"
@@ -431,6 +434,7 @@
         echo "    -Xcompiler-option     Pass an option to the compiler."
         echo "    --runtime-option      Pass an option to the runtime."
         echo "    --debug               Wait for a debugger to attach."
+        echo "    --debuggable          Whether to compile Java code for a debugger."
         echo "    --gdb                 Run under gdb; incompatible with some tests."
         echo "    --build-only          Build test files only (off by default)."
         echo "    --interpreter         Enable interpreter only mode (off by default)."
@@ -464,7 +468,7 @@
         echo "    --gcverify            Run with gc verification"
         echo "    --always-clean        Delete the test files even if the test fails."
         echo "    --android-root [path] The path on target for the android root. (/system by default)."
-        echo "    --dex2oat-swap       Use a dex2oat swap file."
+        echo "    --dex2oat-swap        Use a dex2oat swap file."
     ) 1>&2
     exit 1
 fi