Merge "X86: Assembler: Correct r8_form for some cases"
diff --git a/Android.mk b/Android.mk
index c5e90f2..b006bfe 100644
--- a/Android.mk
+++ b/Android.mk
@@ -316,7 +316,8 @@
 		--dex-location=/$(1) --oat-file=$$@ \
 		--instruction-set=$(DEX2OAT_TARGET_ARCH) \
 		--instruction-set-features=$(DEX2OAT_TARGET_INSTRUCTION_SET_FEATURES) \
-		--android-root=$(PRODUCT_OUT)/system --include-patch-information
+		--android-root=$(PRODUCT_OUT)/system --include-patch-information \
+		--runtime-arg -Xnorelocate
 
 endif
 
@@ -389,8 +390,8 @@
 	adb root && sleep 3
 	adb shell stop
 	adb shell rm -rf $(ART_TARGET_DALVIK_CACHE_DIR)/*
-	adb shell setprop dalvik.vm.dex2oat-flags ""
-	adb shell setprop dalvik.vm.image-dex2oat-flags ""
+	adb shell setprop dalvik.vm.dex2oat-filter ""
+	adb shell setprop dalvik.vm.image-dex2oat-filter ""
 	adb shell setprop persist.sys.dalvik.vm.lib.2 libart.so
 	adb shell start
 
@@ -399,8 +400,8 @@
 	adb root && sleep 3
 	adb shell stop
 	adb shell rm -rf $(ART_TARGET_DALVIK_CACHE_DIR)/*
-	adb shell setprop dalvik.vm.dex2oat-flags ""
-	adb shell setprop dalvik.vm.image-dex2oat-flags ""
+	adb shell setprop dalvik.vm.dex2oat-filter ""
+	adb shell setprop dalvik.vm.image-dex2oat-filter ""
 	adb shell setprop persist.sys.dalvik.vm.lib.2 libartd.so
 	adb shell start
 
@@ -409,8 +410,8 @@
 	adb root && sleep 3
 	adb shell stop
 	adb shell rm -rf $(ART_TARGET_DALVIK_CACHE_DIR)/*
-	adb shell setprop dalvik.vm.dex2oat-flags "--compiler-filter=interpret-only"
-	adb shell setprop dalvik.vm.image-dex2oat-flags ""
+	adb shell setprop dalvik.vm.dex2oat-filter "interpret-only"
+	adb shell setprop dalvik.vm.image-dex2oat-filter ""
 	adb shell setprop persist.sys.dalvik.vm.lib.2 libart.so
 	adb shell start
 
@@ -419,8 +420,8 @@
 	adb root && sleep 3
 	adb shell stop
 	adb shell rm -rf $(ART_TARGET_DALVIK_CACHE_DIR)/*
-	adb shell setprop dalvik.vm.dex2oat-flags "--compiler-filter=interpret-only"
-	adb shell setprop dalvik.vm.image-dex2oat-flags "--compiler-filter=interpret-only"
+	adb shell setprop dalvik.vm.dex2oat-filter "interpret-only"
+	adb shell setprop dalvik.vm.image-dex2oat-filter "interpret-only"
 	adb shell setprop persist.sys.dalvik.vm.lib.2 libart.so
 	adb shell start
 
@@ -429,8 +430,8 @@
 	adb root && sleep 3
 	adb shell stop
 	adb shell rm -rf $(ART_TARGET_DALVIK_CACHE_DIR)/*
-	adb shell setprop dalvik.vm.dex2oat-flags "--compiler-filter=interpret-only"
-	adb shell setprop dalvik.vm.image-dex2oat-flags "--compiler-filter=interpret-only"
+	adb shell setprop dalvik.vm.dex2oat-filter "interpret-only"
+	adb shell setprop dalvik.vm.image-dex2oat-filter "interpret-only"
 	adb shell setprop persist.sys.dalvik.vm.lib.2 libartd.so
 	adb shell start
 
@@ -439,8 +440,8 @@
 	adb root && sleep 3
 	adb shell stop
 	adb shell rm -rf $(ART_TARGET_DALVIK_CACHE_DIR)/*
-	adb shell setprop dalvik.vm.dex2oat-flags "--compiler-filter=verify-none"
-	adb shell setprop dalvik.vm.image-dex2oat-flags "--compiler-filter=verify-none"
+	adb shell setprop dalvik.vm.dex2oat-filter "verify-none"
+	adb shell setprop dalvik.vm.image-dex2oat-filter "verify-none"
 	adb shell setprop persist.sys.dalvik.vm.lib.2 libart.so
 	adb shell start
 
diff --git a/compiler/dex/compiler_enums.h b/compiler/dex/compiler_enums.h
index 346fbb8..dcc67c3 100644
--- a/compiler/dex/compiler_enums.h
+++ b/compiler/dex/compiler_enums.h
@@ -467,8 +467,13 @@
   kIsQuinOp,
   kIsSextupleOp,
   kIsIT,
+  kIsMoveOp,
   kMemLoad,
   kMemStore,
+  kMemVolatile,
+  kMemScaledx0,
+  kMemScaledx2,
+  kMemScaledx4,
   kPCRelFixup,  // x86 FIXME: add NEEDS_FIXUP to instruction attributes.
   kRegDef0,
   kRegDef1,
diff --git a/compiler/dex/frontend.cc b/compiler/dex/frontend.cc
index 11dd182..4f8c1d4 100644
--- a/compiler/dex/frontend.cc
+++ b/compiler/dex/frontend.cc
@@ -42,7 +42,7 @@
 
 /* Default optimizer/debug setting for the compiler. */
 static uint32_t kCompilerOptimizerDisableFlags = 0 |  // Disable specific optimizations
-  (1 << kLoadStoreElimination) |  // TODO: this pass has been broken for awhile - fix or delete.
+  // (1 << kLoadStoreElimination) |
   // (1 << kLoadHoisting) |
   // (1 << kSuppressLoads) |
   // (1 << kNullCheckElimination) |
@@ -96,12 +96,12 @@
     ~0U,
     // 1 = kArm, unused (will use kThumb2).
     ~0U,
-    // 2 = kArm64.     TODO(Arm64): enable optimizations once backend is mature enough.
-    (1 << kLoadStoreElimination) |
+    // 2 = kArm64.
     0,
     // 3 = kThumb2.
     0,
     // 4 = kX86.
+    (1 << kLoadStoreElimination) |
     0,
     // 5 = kX86_64.
     (1 << kLoadStoreElimination) |
diff --git a/compiler/dex/mir_graph.cc b/compiler/dex/mir_graph.cc
index 8d0a5a3..6aee563 100644
--- a/compiler/dex/mir_graph.cc
+++ b/compiler/dex/mir_graph.cc
@@ -69,6 +69,7 @@
 
 MIRGraph::MIRGraph(CompilationUnit* cu, ArenaAllocator* arena)
     : reg_location_(NULL),
+      block_id_map_(std::less<unsigned int>(), arena->Adapter()),
       cu_(cu),
       ssa_base_vregs_(NULL),
       ssa_subscripts_(NULL),
@@ -101,11 +102,14 @@
       num_blocks_(0),
       current_code_item_(NULL),
       dex_pc_to_block_map_(arena, 0, kGrowableArrayMisc),
+      m_units_(arena->Adapter()),
+      method_stack_(arena->Adapter()),
       current_method_(kInvalidEntry),
       current_offset_(kInvalidEntry),
       def_count_(0),
       opcode_count_(NULL),
       num_ssa_regs_(0),
+      extended_basic_blocks_(arena->Adapter()),
       method_sreg_(0),
       attributes_(METHOD_IS_LEAF),  // Start with leaf assumption, change on encountering invoke.
       checkstats_(NULL),
diff --git a/compiler/dex/mir_graph.h b/compiler/dex/mir_graph.h
index 768ae21..491d72e 100644
--- a/compiler/dex/mir_graph.h
+++ b/compiler/dex/mir_graph.h
@@ -27,6 +27,7 @@
 #include "mir_method_info.h"
 #include "utils/arena_bit_vector.h"
 #include "utils/growable_array.h"
+#include "utils/arena_containers.h"
 #include "utils/scoped_arena_containers.h"
 #include "reg_location.h"
 #include "reg_storage.h"
@@ -1051,8 +1052,8 @@
   std::set<uint32_t> catches_;
 
   // TODO: make these private.
-  RegLocation* reg_location_;                         // Map SSA names to location.
-  SafeMap<unsigned int, unsigned int> block_id_map_;  // Block collapse lookup cache.
+  RegLocation* reg_location_;                               // Map SSA names to location.
+  ArenaSafeMap<unsigned int, unsigned int> block_id_map_;   // Block collapse lookup cache.
 
   static const char* extended_mir_op_names_[kMirOpLast - kMirOpFirst];
   static const uint32_t analysis_attributes_[kMirOpLast];
@@ -1171,15 +1172,15 @@
   unsigned int num_blocks_;
   const DexFile::CodeItem* current_code_item_;
   GrowableArray<uint16_t> dex_pc_to_block_map_;  // FindBlock lookup cache.
-  std::vector<DexCompilationUnit*> m_units_;     // List of methods included in this graph
+  ArenaVector<DexCompilationUnit*> m_units_;     // List of methods included in this graph
   typedef std::pair<int, int> MIRLocation;       // Insert point, (m_unit_ index, offset)
-  std::vector<MIRLocation> method_stack_;        // Include stack
+  ArenaVector<MIRLocation> method_stack_;        // Include stack
   int current_method_;
   DexOffset current_offset_;                     // Offset in code units
   int def_count_;                                // Used to estimate size of ssa name storage.
   int* opcode_count_;                            // Dex opcode coverage stats.
   int num_ssa_regs_;                             // Number of names following SSA transformation.
-  std::vector<BasicBlockId> extended_basic_blocks_;  // Heads of block "traces".
+  ArenaVector<BasicBlockId> extended_basic_blocks_;  // Heads of block "traces".
   int method_sreg_;
   unsigned int attributes_;
   Checkstats* checkstats_;
diff --git a/compiler/dex/quick/arm/assemble_arm.cc b/compiler/dex/quick/arm/assemble_arm.cc
index 5083bbc..35c3597 100644
--- a/compiler/dex/quick/arm/assemble_arm.cc
+++ b/compiler/dex/quick/arm/assemble_arm.cc
@@ -217,7 +217,7 @@
                  "ldmia", "!0C!!, <!1R>", 2, kFixupNone),
     ENCODING_MAP(kThumbLdrRRI5,      0x6800,
                  kFmtBitBlt, 2, 0, kFmtBitBlt, 5, 3, kFmtBitBlt, 10, 6,
-                 kFmtUnused, -1, -1, IS_TERTIARY_OP | REG_DEF0_USE1 | IS_LOAD,
+                 kFmtUnused, -1, -1, IS_TERTIARY_OP | REG_DEF0_USE1 | IS_LOAD_OFF4,
                  "ldr", "!0C, [!1C, #!2E]", 2, kFixupNone),
     ENCODING_MAP(kThumbLdrRRR,       0x5800,
                  kFmtBitBlt, 2, 0, kFmtBitBlt, 5, 3, kFmtBitBlt, 8, 6,
@@ -226,14 +226,14 @@
     ENCODING_MAP(kThumbLdrPcRel,    0x4800,
                  kFmtBitBlt, 10, 8, kFmtBitBlt, 7, 0, kFmtUnused, -1, -1,
                  kFmtUnused, -1, -1, IS_TERTIARY_OP | REG_DEF0 | REG_USE_PC
-                 | IS_LOAD | NEEDS_FIXUP, "ldr", "!0C, [pc, #!1E]", 2, kFixupLoad),
+                 | IS_LOAD_OFF4 | NEEDS_FIXUP, "ldr", "!0C, [pc, #!1E]", 2, kFixupLoad),
     ENCODING_MAP(kThumbLdrSpRel,    0x9800,
                  kFmtBitBlt, 10, 8, kFmtSkip, -1, -1, kFmtBitBlt, 7, 0,
                  kFmtUnused, -1, -1, IS_TERTIARY_OP | REG_DEF0 | REG_USE_SP
-                 | IS_LOAD, "ldr", "!0C, [sp, #!2E]", 2, kFixupNone),
+                 | IS_LOAD_OFF4, "ldr", "!0C, [sp, #!2E]", 2, kFixupNone),
     ENCODING_MAP(kThumbLdrbRRI5,     0x7800,
                  kFmtBitBlt, 2, 0, kFmtBitBlt, 5, 3, kFmtBitBlt, 10, 6,
-                 kFmtUnused, -1, -1, IS_TERTIARY_OP | REG_DEF0_USE1 | IS_LOAD,
+                 kFmtUnused, -1, -1, IS_TERTIARY_OP | REG_DEF0_USE1 | IS_LOAD_OFF,
                  "ldrb", "!0C, [!1C, #2d]", 2, kFixupNone),
     ENCODING_MAP(kThumbLdrbRRR,      0x5c00,
                  kFmtBitBlt, 2, 0, kFmtBitBlt, 5, 3, kFmtBitBlt, 8, 6,
@@ -241,7 +241,7 @@
                  "ldrb", "!0C, [!1C, !2C]", 2, kFixupNone),
     ENCODING_MAP(kThumbLdrhRRI5,     0x8800,
                  kFmtBitBlt, 2, 0, kFmtBitBlt, 5, 3, kFmtBitBlt, 10, 6,
-                 kFmtUnused, -1, -1, IS_TERTIARY_OP | REG_DEF0_USE1 | IS_LOAD,
+                 kFmtUnused, -1, -1, IS_TERTIARY_OP | REG_DEF0_USE1 | IS_LOAD_OFF2,
                  "ldrh", "!0C, [!1C, #!2F]", 2, kFixupNone),
     ENCODING_MAP(kThumbLdrhRRR,      0x5a00,
                  kFmtBitBlt, 2, 0, kFmtBitBlt, 5, 3, kFmtBitBlt, 8, 6,
@@ -283,19 +283,19 @@
     ENCODING_MAP(kThumbMovRR,        0x1c00,
                  kFmtBitBlt, 2, 0, kFmtBitBlt, 5, 3, kFmtUnused, -1, -1,
                  kFmtUnused, -1, -1,
-                 IS_BINARY_OP | REG_DEF0_USE1 | SETS_CCODES,
+                 IS_BINARY_OP | REG_DEF0_USE1 | SETS_CCODES |IS_MOVE,
                  "movs", "!0C, !1C", 2, kFixupNone),
     ENCODING_MAP(kThumbMovRR_H2H,    0x46c0,
                  kFmtBitBlt, 2, 0, kFmtBitBlt, 5, 3, kFmtUnused, -1, -1,
-                 kFmtUnused, -1, -1, IS_BINARY_OP | REG_DEF0_USE1,
+                 kFmtUnused, -1, -1, IS_BINARY_OP | REG_DEF0_USE1 | IS_MOVE,
                  "mov", "!0C, !1C", 2, kFixupNone),
     ENCODING_MAP(kThumbMovRR_H2L,    0x4640,
                  kFmtBitBlt, 2, 0, kFmtBitBlt, 5, 3, kFmtUnused, -1, -1,
-                 kFmtUnused, -1, -1, IS_BINARY_OP | REG_DEF0_USE1,
+                 kFmtUnused, -1, -1, IS_BINARY_OP | REG_DEF0_USE1 | IS_MOVE,
                  "mov", "!0C, !1C", 2, kFixupNone),
     ENCODING_MAP(kThumbMovRR_L2H,    0x4680,
                  kFmtBitBlt, 2, 0, kFmtBitBlt, 5, 3, kFmtUnused, -1, -1,
-                 kFmtUnused, -1, -1, IS_BINARY_OP | REG_DEF0_USE1,
+                 kFmtUnused, -1, -1, IS_BINARY_OP | REG_DEF0_USE1 | IS_MOVE,
                  "mov", "!0C, !1C", 2, kFixupNone),
     ENCODING_MAP(kThumbMul,           0x4340,
                  kFmtBitBlt, 2, 0, kFmtBitBlt, 5, 3, kFmtUnused, -1, -1,
@@ -354,7 +354,7 @@
                  "stmia", "!0C!!, <!1R>", 2, kFixupNone),
     ENCODING_MAP(kThumbStrRRI5,      0x6000,
                  kFmtBitBlt, 2, 0, kFmtBitBlt, 5, 3, kFmtBitBlt, 10, 6,
-                 kFmtUnused, -1, -1, IS_TERTIARY_OP | REG_USE01 | IS_STORE,
+                 kFmtUnused, -1, -1, IS_TERTIARY_OP | REG_USE01 | IS_STORE_OFF4,
                  "str", "!0C, [!1C, #!2E]", 2, kFixupNone),
     ENCODING_MAP(kThumbStrRRR,       0x5000,
                  kFmtBitBlt, 2, 0, kFmtBitBlt, 5, 3, kFmtBitBlt, 8, 6,
@@ -363,10 +363,10 @@
     ENCODING_MAP(kThumbStrSpRel,    0x9000,
                  kFmtBitBlt, 10, 8, kFmtSkip, -1, -1, kFmtBitBlt, 7, 0,
                  kFmtUnused, -1, -1, IS_TERTIARY_OP | REG_USE0 | REG_USE_SP
-                 | IS_STORE, "str", "!0C, [sp, #!2E]", 2, kFixupNone),
+                 | IS_STORE_OFF4, "str", "!0C, [sp, #!2E]", 2, kFixupNone),
     ENCODING_MAP(kThumbStrbRRI5,     0x7000,
                  kFmtBitBlt, 2, 0, kFmtBitBlt, 5, 3, kFmtBitBlt, 10, 6,
-                 kFmtUnused, -1, -1, IS_TERTIARY_OP | REG_USE01 | IS_STORE,
+                 kFmtUnused, -1, -1, IS_TERTIARY_OP | REG_USE01 | IS_STORE_OFF,
                  "strb", "!0C, [!1C, #!2d]", 2, kFixupNone),
     ENCODING_MAP(kThumbStrbRRR,      0x5400,
                  kFmtBitBlt, 2, 0, kFmtBitBlt, 5, 3, kFmtBitBlt, 8, 6,
@@ -374,7 +374,7 @@
                  "strb", "!0C, [!1C, !2C]", 2, kFixupNone),
     ENCODING_MAP(kThumbStrhRRI5,     0x8000,
                  kFmtBitBlt, 2, 0, kFmtBitBlt, 5, 3, kFmtBitBlt, 10, 6,
-                 kFmtUnused, -1, -1, IS_TERTIARY_OP | REG_USE01 | IS_STORE,
+                 kFmtUnused, -1, -1, IS_TERTIARY_OP | REG_USE01 | IS_STORE_OFF2,
                  "strh", "!0C, [!1C, #!2F]", 2, kFixupNone),
     ENCODING_MAP(kThumbStrhRRR,      0x5200,
                  kFmtBitBlt, 2, 0, kFmtBitBlt, 5, 3, kFmtBitBlt, 8, 6,
@@ -423,11 +423,11 @@
      */
     ENCODING_MAP(kThumb2Vldrs,       0xed900a00,
                  kFmtSfp, 22, 12, kFmtBitBlt, 19, 16, kFmtBitBlt, 7, 0,
-                 kFmtUnused, -1, -1, IS_TERTIARY_OP | REG_DEF0_USE1 | IS_LOAD |
+                 kFmtUnused, -1, -1, IS_TERTIARY_OP | REG_DEF0_USE1 | IS_LOAD_OFF4 |
                  REG_DEF_LR | NEEDS_FIXUP, "vldr", "!0s, [!1C, #!2E]", 4, kFixupVLoad),
     ENCODING_MAP(kThumb2Vldrd,       0xed900b00,
                  kFmtDfp, 22, 12, kFmtBitBlt, 19, 16, kFmtBitBlt, 7, 0,
-                 kFmtUnused, -1, -1, IS_TERTIARY_OP | REG_DEF0_USE1 | IS_LOAD |
+                 kFmtUnused, -1, -1, IS_TERTIARY_OP | REG_DEF0_USE1 | IS_LOAD_OFF |
                  REG_DEF_LR | NEEDS_FIXUP, "vldr", "!0S, [!1C, #!2E]", 4, kFixupVLoad),
     ENCODING_MAP(kThumb2Vmuls,        0xee200a00,
                  kFmtSfp, 22, 12, kFmtSfp, 7, 16, kFmtSfp, 5, 0,
@@ -440,11 +440,11 @@
                  "vmuld", "!0S, !1S, !2S", 4, kFixupNone),
     ENCODING_MAP(kThumb2Vstrs,       0xed800a00,
                  kFmtSfp, 22, 12, kFmtBitBlt, 19, 16, kFmtBitBlt, 7, 0,
-                 kFmtUnused, -1, -1, IS_TERTIARY_OP | REG_USE01 | IS_STORE,
+                 kFmtUnused, -1, -1, IS_TERTIARY_OP | REG_USE01 | IS_STORE_OFF4,
                  "vstr", "!0s, [!1C, #!2E]", 4, kFixupNone),
     ENCODING_MAP(kThumb2Vstrd,       0xed800b00,
                  kFmtDfp, 22, 12, kFmtBitBlt, 19, 16, kFmtBitBlt, 7, 0,
-                 kFmtUnused, -1, -1, IS_TERTIARY_OP | REG_USE01 | IS_STORE,
+                 kFmtUnused, -1, -1, IS_TERTIARY_OP | REG_USE01 | IS_STORE_OFF4,
                  "vstr", "!0S, [!1C, #!2E]", 4, kFixupNone),
     ENCODING_MAP(kThumb2Vsubs,        0xee300a40,
                  kFmtSfp, 22, 12, kFmtSfp, 7, 16, kFmtSfp, 5, 0,
@@ -520,19 +520,19 @@
                  "mov", "!0C, #!1M", 4, kFixupNone),
     ENCODING_MAP(kThumb2StrRRI12,       0xf8c00000,
                  kFmtBitBlt, 15, 12, kFmtBitBlt, 19, 16, kFmtBitBlt, 11, 0,
-                 kFmtUnused, -1, -1, IS_TERTIARY_OP | REG_USE01 | IS_STORE,
+                 kFmtUnused, -1, -1, IS_TERTIARY_OP | REG_USE01 | IS_STORE_OFF,
                  "str", "!0C, [!1C, #!2d]", 4, kFixupNone),
     ENCODING_MAP(kThumb2LdrRRI12,       0xf8d00000,
                  kFmtBitBlt, 15, 12, kFmtBitBlt, 19, 16, kFmtBitBlt, 11, 0,
-                 kFmtUnused, -1, -1, IS_TERTIARY_OP | REG_DEF0_USE1 | IS_LOAD,
+                 kFmtUnused, -1, -1, IS_TERTIARY_OP | REG_DEF0_USE1 | IS_LOAD_OFF,
                  "ldr", "!0C, [!1C, #!2d]", 4, kFixupNone),
     ENCODING_MAP(kThumb2StrRRI8Predec,       0xf8400c00,
                  kFmtBitBlt, 15, 12, kFmtBitBlt, 19, 16, kFmtBitBlt, 8, 0,
-                 kFmtUnused, -1, -1, IS_TERTIARY_OP | REG_USE01 | IS_STORE,
+                 kFmtUnused, -1, -1, IS_TERTIARY_OP | REG_USE01 | IS_STORE_OFF,
                  "str", "!0C, [!1C, #-!2d]", 4, kFixupNone),
     ENCODING_MAP(kThumb2LdrRRI8Predec,       0xf8500c00,
                  kFmtBitBlt, 15, 12, kFmtBitBlt, 19, 16, kFmtBitBlt, 8, 0,
-                 kFmtUnused, -1, -1, IS_TERTIARY_OP | REG_DEF0_USE1 | IS_LOAD,
+                 kFmtUnused, -1, -1, IS_TERTIARY_OP | REG_DEF0_USE1 | IS_LOAD_OFF,
                  "ldr", "!0C, [!1C, #-!2d]", 4, kFixupNone),
     ENCODING_MAP(kThumb2Cbnz,       0xb900, /* Note: does not affect flags */
                  kFmtBitBlt, 2, 0, kFmtImm6, -1, -1, kFmtUnused, -1, -1,
@@ -549,15 +549,15 @@
                  "add", "!0C,!1C,#!2d", 4, kFixupNone),
     ENCODING_MAP(kThumb2MovRR,       0xea4f0000, /* no setflags encoding */
                  kFmtBitBlt, 11, 8, kFmtBitBlt, 3, 0, kFmtUnused, -1, -1,
-                 kFmtUnused, -1, -1, IS_BINARY_OP | REG_DEF0_USE1,
+                 kFmtUnused, -1, -1, IS_BINARY_OP | REG_DEF0_USE1 | IS_MOVE,
                  "mov", "!0C, !1C", 4, kFixupNone),
     ENCODING_MAP(kThumb2Vmovs,       0xeeb00a40,
                  kFmtSfp, 22, 12, kFmtSfp, 5, 0, kFmtUnused, -1, -1,
-                 kFmtUnused, -1, -1, IS_BINARY_OP | REG_DEF0_USE1,
+                 kFmtUnused, -1, -1, IS_BINARY_OP | REG_DEF0_USE1 | IS_MOVE,
                  "vmov.f32 ", " !0s, !1s", 4, kFixupNone),
     ENCODING_MAP(kThumb2Vmovd,       0xeeb00b40,
                  kFmtDfp, 22, 12, kFmtDfp, 5, 0, kFmtUnused, -1, -1,
-                 kFmtUnused, -1, -1, IS_BINARY_OP | REG_DEF0_USE1,
+                 kFmtUnused, -1, -1, IS_BINARY_OP | REG_DEF0_USE1 | IS_MOVE,
                  "vmov.f64 ", " !0S, !1S", 4, kFixupNone),
     ENCODING_MAP(kThumb2Ldmia,         0xe8900000,
                  kFmtBitBlt, 19, 16, kFmtBitBlt, 15, 0, kFmtUnused, -1, -1,
@@ -613,59 +613,59 @@
                  "sbfx", "!0C, !1C, #!2d, #!3d", 4, kFixupNone),
     ENCODING_MAP(kThumb2LdrRRR,    0xf8500000,
                  kFmtBitBlt, 15, 12, kFmtBitBlt, 19, 16, kFmtBitBlt, 3, 0,
-                 kFmtBitBlt, 5, 4, IS_QUAD_OP | REG_DEF0_USE12 | IS_LOAD,
+                 kFmtBitBlt, 5, 4, IS_QUAD_OP | REG_DEF0_USE12 | IS_LOAD_OFF,
                  "ldr", "!0C, [!1C, !2C, LSL #!3d]", 4, kFixupNone),
     ENCODING_MAP(kThumb2LdrhRRR,    0xf8300000,
                  kFmtBitBlt, 15, 12, kFmtBitBlt, 19, 16, kFmtBitBlt, 3, 0,
-                 kFmtBitBlt, 5, 4, IS_QUAD_OP | REG_DEF0_USE12 | IS_LOAD,
+                 kFmtBitBlt, 5, 4, IS_QUAD_OP | REG_DEF0_USE12 | IS_LOAD_OFF,
                  "ldrh", "!0C, [!1C, !2C, LSL #!3d]", 4, kFixupNone),
     ENCODING_MAP(kThumb2LdrshRRR,    0xf9300000,
                  kFmtBitBlt, 15, 12, kFmtBitBlt, 19, 16, kFmtBitBlt, 3, 0,
-                 kFmtBitBlt, 5, 4, IS_QUAD_OP | REG_DEF0_USE12 | IS_LOAD,
+                 kFmtBitBlt, 5, 4, IS_QUAD_OP | REG_DEF0_USE12 | IS_LOAD_OFF,
                  "ldrsh", "!0C, [!1C, !2C, LSL #!3d]", 4, kFixupNone),
     ENCODING_MAP(kThumb2LdrbRRR,    0xf8100000,
                  kFmtBitBlt, 15, 12, kFmtBitBlt, 19, 16, kFmtBitBlt, 3, 0,
-                 kFmtBitBlt, 5, 4, IS_QUAD_OP | REG_DEF0_USE12 | IS_LOAD,
+                 kFmtBitBlt, 5, 4, IS_QUAD_OP | REG_DEF0_USE12 | IS_LOAD_OFF,
                  "ldrb", "!0C, [!1C, !2C, LSL #!3d]", 4, kFixupNone),
     ENCODING_MAP(kThumb2LdrsbRRR,    0xf9100000,
                  kFmtBitBlt, 15, 12, kFmtBitBlt, 19, 16, kFmtBitBlt, 3, 0,
-                 kFmtBitBlt, 5, 4, IS_QUAD_OP | REG_DEF0_USE12 | IS_LOAD,
+                 kFmtBitBlt, 5, 4, IS_QUAD_OP | REG_DEF0_USE12 | IS_LOAD_OFF,
                  "ldrsb", "!0C, [!1C, !2C, LSL #!3d]", 4, kFixupNone),
     ENCODING_MAP(kThumb2StrRRR,    0xf8400000,
                  kFmtBitBlt, 15, 12, kFmtBitBlt, 19, 16, kFmtBitBlt, 3, 0,
-                 kFmtBitBlt, 5, 4, IS_QUAD_OP | REG_USE012 | IS_STORE,
+                 kFmtBitBlt, 5, 4, IS_QUAD_OP | REG_USE012 | IS_STORE_OFF,
                  "str", "!0C, [!1C, !2C, LSL #!3d]", 4, kFixupNone),
     ENCODING_MAP(kThumb2StrhRRR,    0xf8200000,
                  kFmtBitBlt, 15, 12, kFmtBitBlt, 19, 16, kFmtBitBlt, 3, 0,
-                 kFmtBitBlt, 5, 4, IS_QUAD_OP | REG_USE012 | IS_STORE,
+                 kFmtBitBlt, 5, 4, IS_QUAD_OP | REG_USE012 | IS_STORE_OFF,
                  "strh", "!0C, [!1C, !2C, LSL #!3d]", 4, kFixupNone),
     ENCODING_MAP(kThumb2StrbRRR,    0xf8000000,
                  kFmtBitBlt, 15, 12, kFmtBitBlt, 19, 16, kFmtBitBlt, 3, 0,
-                 kFmtBitBlt, 5, 4, IS_QUAD_OP | REG_USE012 | IS_STORE,
+                 kFmtBitBlt, 5, 4, IS_QUAD_OP | REG_USE012 | IS_STORE_OFF,
                  "strb", "!0C, [!1C, !2C, LSL #!3d]", 4, kFixupNone),
     ENCODING_MAP(kThumb2LdrhRRI12,       0xf8b00000,
                  kFmtBitBlt, 15, 12, kFmtBitBlt, 19, 16, kFmtBitBlt, 11, 0,
-                 kFmtUnused, -1, -1, IS_TERTIARY_OP | REG_DEF0_USE1 | IS_LOAD,
+                 kFmtUnused, -1, -1, IS_TERTIARY_OP | REG_DEF0_USE1 | IS_LOAD_OFF,
                  "ldrh", "!0C, [!1C, #!2d]", 4, kFixupNone),
     ENCODING_MAP(kThumb2LdrshRRI12,       0xf9b00000,
                  kFmtBitBlt, 15, 12, kFmtBitBlt, 19, 16, kFmtBitBlt, 11, 0,
-                 kFmtUnused, -1, -1, IS_TERTIARY_OP | REG_DEF0_USE1 | IS_LOAD,
+                 kFmtUnused, -1, -1, IS_TERTIARY_OP | REG_DEF0_USE1 | IS_LOAD_OFF,
                  "ldrsh", "!0C, [!1C, #!2d]", 4, kFixupNone),
     ENCODING_MAP(kThumb2LdrbRRI12,       0xf8900000,
                  kFmtBitBlt, 15, 12, kFmtBitBlt, 19, 16, kFmtBitBlt, 11, 0,
-                 kFmtUnused, -1, -1, IS_TERTIARY_OP | REG_DEF0_USE1 | IS_LOAD,
+                 kFmtUnused, -1, -1, IS_TERTIARY_OP | REG_DEF0_USE1 | IS_LOAD_OFF,
                  "ldrb", "!0C, [!1C, #!2d]", 4, kFixupNone),
     ENCODING_MAP(kThumb2LdrsbRRI12,       0xf9900000,
                  kFmtBitBlt, 15, 12, kFmtBitBlt, 19, 16, kFmtBitBlt, 11, 0,
-                 kFmtUnused, -1, -1, IS_TERTIARY_OP | REG_DEF0_USE1 | IS_LOAD,
+                 kFmtUnused, -1, -1, IS_TERTIARY_OP | REG_DEF0_USE1 | IS_LOAD_OFF,
                  "ldrsb", "!0C, [!1C, #!2d]", 4, kFixupNone),
     ENCODING_MAP(kThumb2StrhRRI12,       0xf8a00000,
                  kFmtBitBlt, 15, 12, kFmtBitBlt, 19, 16, kFmtBitBlt, 11, 0,
-                 kFmtUnused, -1, -1, IS_TERTIARY_OP | REG_USE01 | IS_STORE,
+                 kFmtUnused, -1, -1, IS_TERTIARY_OP | REG_USE01 | IS_STORE_OFF,
                  "strh", "!0C, [!1C, #!2d]", 4, kFixupNone),
     ENCODING_MAP(kThumb2StrbRRI12,       0xf8800000,
                  kFmtBitBlt, 15, 12, kFmtBitBlt, 19, 16, kFmtBitBlt, 11, 0,
-                 kFmtUnused, -1, -1, IS_TERTIARY_OP | REG_USE01 | IS_STORE,
+                 kFmtUnused, -1, -1, IS_TERTIARY_OP | REG_USE01 | IS_STORE_OFF,
                  "strb", "!0C, [!1C, #!2d]", 4, kFixupNone),
     ENCODING_MAP(kThumb2Pop,           0xe8bd0000,
                  kFmtBitBlt, 15, 0, kFmtUnused, -1, -1, kFmtUnused, -1, -1,
@@ -841,7 +841,7 @@
     ENCODING_MAP(kThumb2LdrPcRel12,       0xf8df0000,
                  kFmtBitBlt, 15, 12, kFmtBitBlt, 11, 0, kFmtUnused, -1, -1,
                  kFmtUnused, -1, -1,
-                 IS_TERTIARY_OP | REG_DEF0 | REG_USE_PC | IS_LOAD | NEEDS_FIXUP,
+                 IS_TERTIARY_OP | REG_DEF0 | REG_USE_PC | IS_LOAD_OFF | NEEDS_FIXUP,
                  "ldr", "!0C, [r15pc, #!1d]", 4, kFixupLoad),
     ENCODING_MAP(kThumb2BCond,        0xf0008000,
                  kFmtBrOffset, -1, -1, kFmtBitBlt, 25, 22, kFmtUnused, -1, -1,
@@ -899,19 +899,19 @@
                  "umull", "!0C, !1C, !2C, !3C", 4, kFixupNone),
     ENCODING_MAP(kThumb2Ldrex,       0xe8500f00,
                  kFmtBitBlt, 15, 12, kFmtBitBlt, 19, 16, kFmtBitBlt, 7, 0,
-                 kFmtUnused, -1, -1, IS_TERTIARY_OP | REG_DEF0_USE1 | IS_LOAD,
+                 kFmtUnused, -1, -1, IS_TERTIARY_OP | REG_DEF0_USE1 | IS_LOADX,
                  "ldrex", "!0C, [!1C, #!2E]", 4, kFixupNone),
     ENCODING_MAP(kThumb2Ldrexd,      0xe8d0007f,
                  kFmtBitBlt, 15, 12, kFmtBitBlt, 11, 8, kFmtBitBlt, 19, 16,
-                 kFmtUnused, -1, -1, IS_TERTIARY_OP | REG_DEF01_USE2 | IS_LOAD,
+                 kFmtUnused, -1, -1, IS_TERTIARY_OP | REG_DEF01_USE2 | IS_LOADX,
                  "ldrexd", "!0C, !1C, [!2C]", 4, kFixupNone),
     ENCODING_MAP(kThumb2Strex,       0xe8400000,
                  kFmtBitBlt, 11, 8, kFmtBitBlt, 15, 12, kFmtBitBlt, 19, 16,
-                 kFmtBitBlt, 7, 0, IS_QUAD_OP | REG_DEF0_USE12 | IS_STORE,
+                 kFmtBitBlt, 7, 0, IS_QUAD_OP | REG_DEF0_USE12 | IS_STOREX,
                  "strex", "!0C, !1C, [!2C, #!2E]", 4, kFixupNone),
     ENCODING_MAP(kThumb2Strexd,      0xe8c00070,
                  kFmtBitBlt, 3, 0, kFmtBitBlt, 15, 12, kFmtBitBlt, 11, 8,
-                 kFmtBitBlt, 19, 16, IS_QUAD_OP | REG_DEF0_USE123 | IS_STORE,
+                 kFmtBitBlt, 19, 16, IS_QUAD_OP | REG_DEF0_USE123 | IS_STOREX,
                  "strexd", "!0C, !1C, !2C, [!3C]", 4, kFixupNone),
     ENCODING_MAP(kThumb2Clrex,       0xf3bf8f2f,
                  kFmtUnused, -1, -1, kFmtUnused, -1, -1, kFmtUnused, -1, -1,
@@ -927,12 +927,12 @@
                  "bfc", "!0C,#!1d,#!2d", 4, kFixupNone),
     ENCODING_MAP(kThumb2Dmb,         0xf3bf8f50,
                  kFmtBitBlt, 3, 0, kFmtUnused, -1, -1, kFmtUnused, -1, -1,
-                 kFmtUnused, -1, -1, IS_UNARY_OP,
+                 kFmtUnused, -1, -1, IS_UNARY_OP | IS_VOLATILE,
                  "dmb", "#!0B", 4, kFixupNone),
     ENCODING_MAP(kThumb2LdrPcReln12,       0xf85f0000,
                  kFmtBitBlt, 15, 12, kFmtBitBlt, 11, 0, kFmtUnused, -1, -1,
                  kFmtUnused, -1, -1,
-                 IS_BINARY_OP | REG_DEF0 | REG_USE_PC | IS_LOAD,
+                 IS_BINARY_OP | REG_DEF0 | REG_USE_PC | IS_LOAD_OFF,
                  "ldr", "!0C, [r15pc, -#!1d]", 4, kFixupNone),
     ENCODING_MAP(kThumb2Stm,          0xe9000000,
                  kFmtBitBlt, 19, 16, kFmtBitBlt, 12, 0, kFmtUnused, -1, -1,
@@ -1023,17 +1023,17 @@
     ENCODING_MAP(kThumb2LdrdPcRel8,  0xe9df0000,
                  kFmtBitBlt, 15, 12, kFmtBitBlt, 11, 8, kFmtBitBlt, 7, 0,
                  kFmtUnused, -1, -1,
-                 IS_TERTIARY_OP | REG_DEF0 | REG_DEF1 | REG_USE_PC | IS_LOAD | NEEDS_FIXUP,
+                 IS_TERTIARY_OP | REG_DEF0 | REG_DEF1 | REG_USE_PC | IS_LOAD_OFF4 | NEEDS_FIXUP,
                  "ldrd", "!0C, !1C, [pc, #!2E]", 4, kFixupLoad),
     ENCODING_MAP(kThumb2LdrdI8, 0xe9d00000,
                  kFmtBitBlt, 15, 12, kFmtBitBlt, 11, 8, kFmtBitBlt, 19, 16,
                  kFmtBitBlt, 7, 0,
-                 IS_QUAD_OP | REG_DEF0 | REG_DEF1 | REG_USE2 | IS_LOAD,
+                 IS_QUAD_OP | REG_DEF0 | REG_DEF1 | REG_USE2 | IS_LOAD_OFF4,
                  "ldrd", "!0C, !1C, [!2C, #!3E]", 4, kFixupNone),
     ENCODING_MAP(kThumb2StrdI8, 0xe9c00000,
                  kFmtBitBlt, 15, 12, kFmtBitBlt, 11, 8, kFmtBitBlt, 19, 16,
                  kFmtBitBlt, 7, 0,
-                 IS_QUAD_OP | REG_USE0 | REG_USE1 | REG_USE2 | IS_STORE,
+                 IS_QUAD_OP | REG_USE0 | REG_USE1 | REG_USE2 | IS_STORE_OFF4,
                  "strd", "!0C, !1C, [!2C, #!3E]", 4, kFixupNone),
 };
 
diff --git a/compiler/dex/quick/arm/codegen_arm.h b/compiler/dex/quick/arm/codegen_arm.h
index 9652192..21322a6 100644
--- a/compiler/dex/quick/arm/codegen_arm.h
+++ b/compiler/dex/quick/arm/codegen_arm.h
@@ -84,6 +84,8 @@
     RegisterClass RegClassForFieldLoadStore(OpSize size, bool is_volatile) OVERRIDE;
 
     // Required for target - Dalvik-level generators.
+    void GenArithOpLong(Instruction::Code opcode, RegLocation rl_dest, RegLocation rl_src1,
+                        RegLocation rl_src2) OVERRIDE;
     void GenArithImmOpLong(Instruction::Code opcode, RegLocation rl_dest,
                            RegLocation rl_src1, RegLocation rl_src2);
     void GenArrayGet(int opt_flags, OpSize size, RegLocation rl_array,
@@ -92,12 +94,6 @@
                      RegLocation rl_src, int scale, bool card_mark);
     void GenShiftImmOpLong(Instruction::Code opcode, RegLocation rl_dest,
                            RegLocation rl_src1, RegLocation rl_shift);
-    void GenMulLong(Instruction::Code opcode, RegLocation rl_dest, RegLocation rl_src1,
-                    RegLocation rl_src2);
-    void GenAddLong(Instruction::Code opcode, RegLocation rl_dest, RegLocation rl_src1,
-                    RegLocation rl_src2);
-    void GenAndLong(Instruction::Code opcode, RegLocation rl_dest, RegLocation rl_src1,
-                    RegLocation rl_src2);
     void GenArithOpDouble(Instruction::Code opcode, RegLocation rl_dest, RegLocation rl_src1,
                           RegLocation rl_src2);
     void GenArithOpFloat(Instruction::Code opcode, RegLocation rl_dest, RegLocation rl_src1,
@@ -112,16 +108,6 @@
     bool GenInlinedSqrt(CallInfo* info);
     bool GenInlinedPeek(CallInfo* info, OpSize size);
     bool GenInlinedPoke(CallInfo* info, OpSize size);
-    void GenNotLong(RegLocation rl_dest, RegLocation rl_src);
-    void GenNegLong(RegLocation rl_dest, RegLocation rl_src);
-    void GenOrLong(Instruction::Code opcode, RegLocation rl_dest, RegLocation rl_src1,
-                   RegLocation rl_src2);
-    void GenSubLong(Instruction::Code opcode, RegLocation rl_dest, RegLocation rl_src1,
-                    RegLocation rl_src2);
-    void GenXorLong(Instruction::Code opcode, RegLocation rl_dest, RegLocation rl_src1,
-                    RegLocation rl_src2);
-    void GenDivRemLong(Instruction::Code, RegLocation rl_dest, RegLocation rl_src1,
-                       RegLocation rl_src2, bool is_div);
     RegLocation GenDivRem(RegLocation rl_dest, RegStorage reg_lo, RegStorage reg_hi, bool is_div);
     RegLocation GenDivRemLit(RegLocation rl_dest, RegStorage reg_lo, int lit, bool is_div);
     void GenCmpLong(RegLocation rl_dest, RegLocation rl_src1, RegLocation rl_src2);
@@ -198,8 +184,12 @@
     }
 
     LIR* InvokeTrampoline(OpKind op, RegStorage r_tgt, QuickEntrypointEnum trampoline) OVERRIDE;
+    size_t GetInstructionOffset(LIR* lir);
 
   private:
+    void GenNegLong(RegLocation rl_dest, RegLocation rl_src);
+    void GenMulLong(Instruction::Code opcode, RegLocation rl_dest, RegLocation rl_src1,
+                    RegLocation rl_src2);
     void GenFusedLongCmpImmBranch(BasicBlock* bb, RegLocation rl_src1, int64_t val,
                                   ConditionCode ccode);
     LIR* LoadFPConstantValue(int r_dest, int value);
diff --git a/compiler/dex/quick/arm/int_arm.cc b/compiler/dex/quick/arm/int_arm.cc
index dd14ed9..6711ab3 100644
--- a/compiler/dex/quick/arm/int_arm.cc
+++ b/compiler/dex/quick/arm/int_arm.cc
@@ -1039,15 +1039,6 @@
 #endif
 }
 
-void ArmMir2Lir::GenNotLong(RegLocation rl_dest, RegLocation rl_src) {
-  LOG(FATAL) << "Unexpected use GenNotLong()";
-}
-
-void ArmMir2Lir::GenDivRemLong(Instruction::Code, RegLocation rl_dest, RegLocation rl_src1,
-                           RegLocation rl_src2, bool is_div) {
-  LOG(FATAL) << "Unexpected use GenDivRemLong()";
-}
-
 void ArmMir2Lir::GenNegLong(RegLocation rl_dest, RegLocation rl_src) {
   rl_src = LoadValueWide(rl_src, kCoreReg);
   RegLocation rl_result = EvalLoc(rl_dest, kCoreReg, true);
@@ -1173,29 +1164,23 @@
     StoreValueWide(rl_dest, rl_result);
 }
 
-void ArmMir2Lir::GenAddLong(Instruction::Code opcode, RegLocation rl_dest, RegLocation rl_src1,
-                            RegLocation rl_src2) {
-  LOG(FATAL) << "Unexpected use of GenAddLong for Arm";
-}
+void ArmMir2Lir::GenArithOpLong(Instruction::Code opcode, RegLocation rl_dest, RegLocation rl_src1,
+                                RegLocation rl_src2) {
+  switch (opcode) {
+    case Instruction::MUL_LONG:
+    case Instruction::MUL_LONG_2ADDR:
+      GenMulLong(opcode, rl_dest, rl_src1, rl_src2);
+      return;
+    case Instruction::NEG_LONG:
+      GenNegLong(rl_dest, rl_src2);
+      return;
 
-void ArmMir2Lir::GenSubLong(Instruction::Code opcode, RegLocation rl_dest, RegLocation rl_src1,
-                            RegLocation rl_src2) {
-  LOG(FATAL) << "Unexpected use of GenSubLong for Arm";
-}
+    default:
+      break;
+  }
 
-void ArmMir2Lir::GenAndLong(Instruction::Code opcode, RegLocation rl_dest, RegLocation rl_src1,
-                            RegLocation rl_src2) {
-  LOG(FATAL) << "Unexpected use of GenAndLong for Arm";
-}
-
-void ArmMir2Lir::GenOrLong(Instruction::Code opcode, RegLocation rl_dest, RegLocation rl_src1,
-                           RegLocation rl_src2) {
-  LOG(FATAL) << "Unexpected use of GenOrLong for Arm";
-}
-
-void ArmMir2Lir::GenXorLong(Instruction::Code opcode, RegLocation rl_dest, RegLocation rl_src1,
-                            RegLocation rl_src2) {
-  LOG(FATAL) << "Unexpected use of genXoLong for Arm";
+  // Fallback for all other ops.
+  Mir2Lir::GenArithOpLong(opcode, rl_dest, rl_src1, rl_src2);
 }
 
 /*
diff --git a/compiler/dex/quick/arm/utility_arm.cc b/compiler/dex/quick/arm/utility_arm.cc
index cf21da7..bba1a8c 100644
--- a/compiler/dex/quick/arm/utility_arm.cc
+++ b/compiler/dex/quick/arm/utility_arm.cc
@@ -1169,4 +1169,17 @@
   return OpReg(op, r_tgt);
 }
 
+size_t ArmMir2Lir::GetInstructionOffset(LIR* lir) {
+  uint64_t check_flags = GetTargetInstFlags(lir->opcode);
+  DCHECK((check_flags & IS_LOAD) || (check_flags & IS_STORE));
+  size_t offset = (check_flags & IS_TERTIARY_OP) ? lir->operands[2] : 0;
+
+  if (check_flags & SCALED_OFFSET_X2) {
+    offset = offset * 2;
+  } else if (check_flags & SCALED_OFFSET_X4) {
+    offset = offset * 4;
+  }
+  return offset;
+}
+
 }  // namespace art
diff --git a/compiler/dex/quick/arm64/arm64_lir.h b/compiler/dex/quick/arm64/arm64_lir.h
index 3a8ea3f..a449cbd 100644
--- a/compiler/dex/quick/arm64/arm64_lir.h
+++ b/compiler/dex/quick/arm64/arm64_lir.h
@@ -267,6 +267,8 @@
   kA64Fcvtzs2xf,     // fcvtzs [100111100s111000000000] rn[9-5] rd[4-0].
   kA64Fcvt2Ss,       // fcvt   [0001111000100010110000] rn[9-5] rd[4-0].
   kA64Fcvt2sS,       // fcvt   [0001111001100010010000] rn[9-5] rd[4-0].
+  kA64Fcvtms2ws,     // fcvtms [0001111000110000000000] rn[9-5] rd[4-0].
+  kA64Fcvtms2xS,     // fcvtms [1001111001110000000000] rn[9-5] rd[4-0].
   kA64Fdiv3fff,      // fdiv[000111100s1] rm[20-16] [000110] rn[9-5] rd[4-0].
   kA64Fmax3fff,      // fmax[000111100s1] rm[20-16] [010010] rn[9-5] rd[4-0].
   kA64Fmin3fff,      // fmin[000111100s1] rm[20-16] [010110] rn[9-5] rd[4-0].
@@ -278,6 +280,9 @@
   kA64Fmov2xS,       // fmov[1001111001101111000000] rn[9-5] rd[4-0].
   kA64Fmul3fff,      // fmul[000111100s1] rm[20-16] [000010] rn[9-5] rd[4-0].
   kA64Fneg2ff,       // fneg[000111100s100001010000] rn[9-5] rd[4-0].
+  kA64Frintp2ff,     // frintp [000111100s100100110000] rn[9-5] rd[4-0].
+  kA64Frintm2ff,     // frintm [000111100s100101010000] rn[9-5] rd[4-0].
+  kA64Frintn2ff,     // frintn [000111100s100100010000] rn[9-5] rd[4-0].
   kA64Frintz2ff,     // frintz [000111100s100101110000] rn[9-5] rd[4-0].
   kA64Fsqrt2ff,      // fsqrt[000111100s100001110000] rn[9-5] rd[4-0].
   kA64Fsub3fff,      // fsub[000111100s1] rm[20-16] [001110] rn[9-5] rd[4-0].
@@ -331,6 +336,7 @@
   kA64Stp4ffXD,      // stp [0s10110100] imm_7[21-15] rt2[14-10] rn[9-5] rt[4-0].
   kA64Stp4rrXD,      // stp [s010100100] imm_7[21-15] rt2[14-10] rn[9-5] rt[4-0].
   kA64StpPost4rrXD,  // stp [s010100010] imm_7[21-15] rt2[14-10] rn[9-5] rt[4-0].
+  kA64StpPre4ffXD,   // stp [0s10110110] imm_7[21-15] rt2[14-10] rn[9-5] rt[4-0].
   kA64StpPre4rrXD,   // stp [s010100110] imm_7[21-15] rt2[14-10] rn[9-5] rt[4-0].
   kA64Str3fXD,       // str [1s11110100] imm_12[21-10] rn[9-5] rt[4-0].
   kA64Str4fXxG,      // str [1s111100001] rm[20-16] [011] S[12] [10] rn[9-5] rt[4-0].
diff --git a/compiler/dex/quick/arm64/assemble_arm64.cc b/compiler/dex/quick/arm64/assemble_arm64.cc
index 462be54..15c89f2 100644
--- a/compiler/dex/quick/arm64/assemble_arm64.cc
+++ b/compiler/dex/quick/arm64/assemble_arm64.cc
@@ -214,7 +214,7 @@
                  "csneg", "!0r, !1r, !2r, !3c", kFixupNone),
     ENCODING_MAP(kA64Dmb1B, NO_VARIANTS(0xd50330bf),
                  kFmtBitBlt, 11, 8, kFmtUnused, -1, -1, kFmtUnused, -1, -1,
-                 kFmtUnused, -1, -1, IS_UNARY_OP,
+                 kFmtUnused, -1, -1, IS_UNARY_OP | IS_VOLATILE,
                  "dmb", "#!0B", kFixupNone),
     ENCODING_MAP(WIDE(kA64Eor3Rrl), SF_VARIANTS(0x52000000),
                  kFmtRegROrSp, 4, 0, kFmtRegR, 9, 5, kFmtBitBlt, 22, 10,
@@ -260,6 +260,14 @@
                  kFmtRegS, 4, 0, kFmtRegD, 9, 5, kFmtUnused, -1, -1,
                  kFmtUnused, -1, -1, IS_BINARY_OP | REG_DEF0_USE1,
                  "fcvt", "!0s, !1S", kFixupNone),
+    ENCODING_MAP(kA64Fcvtms2ws, NO_VARIANTS(0x1e300000),
+                 kFmtRegW, 4, 0, kFmtRegS, 9, 5, kFmtUnused, -1, -1,
+                 kFmtUnused, -1, -1, IS_BINARY_OP | REG_DEF0_USE1,
+                 "fcvtms", "!0w, !1s", kFixupNone),
+    ENCODING_MAP(kA64Fcvtms2xS, NO_VARIANTS(0x9e700000),
+                 kFmtRegX, 4, 0, kFmtRegD, 9, 5, kFmtUnused, -1, -1,
+                 kFmtUnused, -1, -1, IS_BINARY_OP | REG_DEF0_USE1,
+                 "fcvtms", "!0x, !1S", kFixupNone),
     ENCODING_MAP(FWIDE(kA64Fdiv3fff), FLOAT_VARIANTS(0x1e201800),
                  kFmtRegF, 4, 0, kFmtRegF, 9, 5, kFmtRegF, 20, 16,
                  kFmtUnused, -1, -1, IS_TERTIARY_OP | REG_DEF0_USE12,
@@ -274,7 +282,7 @@
                  "fmin", "!0f, !1f, !2f", kFixupNone),
     ENCODING_MAP(FWIDE(kA64Fmov2ff), FLOAT_VARIANTS(0x1e204000),
                  kFmtRegF, 4, 0, kFmtRegF, 9, 5, kFmtUnused, -1, -1,
-                 kFmtUnused, -1, -1, IS_BINARY_OP | REG_DEF0_USE1,
+                 kFmtUnused, -1, -1, IS_BINARY_OP | REG_DEF0_USE1 | IS_MOVE,
                  "fmov", "!0f, !1f", kFixupNone),
     ENCODING_MAP(FWIDE(kA64Fmov2fI), FLOAT_VARIANTS(0x1e201000),
                  kFmtRegF, 4, 0, kFmtBitBlt, 20, 13, kFmtUnused, -1, -1,
@@ -304,6 +312,18 @@
                  kFmtRegF, 4, 0, kFmtRegF, 9, 5, kFmtUnused, -1, -1,
                  kFmtUnused, -1, -1, IS_BINARY_OP | REG_DEF0_USE1,
                  "fneg", "!0f, !1f", kFixupNone),
+    ENCODING_MAP(FWIDE(kA64Frintp2ff), FLOAT_VARIANTS(0x1e24c000),
+                 kFmtRegF, 4, 0, kFmtRegF, 9, 5, kFmtUnused, -1, -1,
+                 kFmtUnused, -1, -1, IS_BINARY_OP | REG_DEF0_USE1,
+                 "frintp", "!0f, !1f", kFixupNone),
+    ENCODING_MAP(FWIDE(kA64Frintm2ff), FLOAT_VARIANTS(0x1e254000),
+                 kFmtRegF, 4, 0, kFmtRegF, 9, 5, kFmtUnused, -1, -1,
+                 kFmtUnused, -1, -1, IS_BINARY_OP | REG_DEF0_USE1,
+                 "frintm", "!0f, !1f", kFixupNone),
+    ENCODING_MAP(FWIDE(kA64Frintn2ff), FLOAT_VARIANTS(0x1e244000),
+                 kFmtRegF, 4, 0, kFmtRegF, 9, 5, kFmtUnused, -1, -1,
+                 kFmtUnused, -1, -1, IS_BINARY_OP | REG_DEF0_USE1,
+                 "frintn", "!0f, !1f", kFixupNone),
     ENCODING_MAP(FWIDE(kA64Frintz2ff), FLOAT_VARIANTS(0x1e25c000),
                  kFmtRegF, 4, 0, kFmtRegF, 9, 5, kFmtUnused, -1, -1,
                  kFmtUnused, -1, -1, IS_BINARY_OP | REG_DEF0_USE1,
@@ -318,7 +338,7 @@
                  "fsub", "!0f, !1f, !2f", kFixupNone),
     ENCODING_MAP(kA64Ldrb3wXd, NO_VARIANTS(0x39400000),
                  kFmtRegW, 4, 0, kFmtRegXOrSp, 9, 5, kFmtBitBlt, 21, 10,
-                 kFmtUnused, -1, -1, IS_TERTIARY_OP | REG_DEF0_USE1 | IS_LOAD,
+                 kFmtUnused, -1, -1, IS_TERTIARY_OP | REG_DEF0_USE1 | IS_LOAD_OFF,
                  "ldrb", "!0w, [!1X, #!2d]", kFixupNone),
     ENCODING_MAP(kA64Ldrb3wXx, NO_VARIANTS(0x38606800),
                  kFmtRegW, 4, 0, kFmtRegXOrSp, 9, 5, kFmtRegX, 20, 16,
@@ -326,7 +346,7 @@
                  "ldrb", "!0w, [!1X, !2x]", kFixupNone),
     ENCODING_MAP(WIDE(kA64Ldrsb3rXd), CUSTOM_VARIANTS(0x39c00000, 0x39800000),
                  kFmtRegR, 4, 0, kFmtRegXOrSp, 9, 5, kFmtBitBlt, 21, 10,
-                 kFmtUnused, -1, -1, IS_TERTIARY_OP | REG_DEF0_USE1 | IS_LOAD,
+                 kFmtUnused, -1, -1, IS_TERTIARY_OP | REG_DEF0_USE1 | IS_LOAD_OFF,
                  "ldrsb", "!0r, [!1X, #!2d]", kFixupNone),
     ENCODING_MAP(WIDE(kA64Ldrsb3rXx), CUSTOM_VARIANTS(0x38e06800, 0x38a06800),
                  kFmtRegR, 4, 0, kFmtRegXOrSp, 9, 5, kFmtRegX, 20, 16,
@@ -334,19 +354,19 @@
                  "ldrsb", "!0r, [!1X, !2x]", kFixupNone),
     ENCODING_MAP(kA64Ldrh3wXF, NO_VARIANTS(0x79400000),
                  kFmtRegW, 4, 0, kFmtRegXOrSp, 9, 5, kFmtBitBlt, 21, 10,
-                 kFmtUnused, -1, -1, IS_TERTIARY_OP | REG_DEF0_USE1 | IS_LOAD,
+                 kFmtUnused, -1, -1, IS_TERTIARY_OP | REG_DEF0_USE1 | IS_LOAD_OFF,
                  "ldrh", "!0w, [!1X, #!2F]", kFixupNone),
     ENCODING_MAP(kA64Ldrh4wXxd, NO_VARIANTS(0x78606800),
                  kFmtRegW, 4, 0, kFmtRegXOrSp, 9, 5, kFmtRegX, 20, 16,
-                 kFmtBitBlt, 12, 12, IS_QUAD_OP | REG_DEF0_USE12 | IS_LOAD,
+                 kFmtBitBlt, 12, 12, IS_QUAD_OP | REG_DEF0_USE12 | IS_LOAD_OFF,
                  "ldrh", "!0w, [!1X, !2x, lsl #!3d]", kFixupNone),
     ENCODING_MAP(WIDE(kA64Ldrsh3rXF), CUSTOM_VARIANTS(0x79c00000, 0x79800000),
                  kFmtRegR, 4, 0, kFmtRegXOrSp, 9, 5, kFmtBitBlt, 21, 10,
-                 kFmtUnused, -1, -1, IS_TERTIARY_OP | REG_DEF0_USE1 | IS_LOAD,
+                 kFmtUnused, -1, -1, IS_TERTIARY_OP | REG_DEF0_USE1 | IS_LOAD_OFF,
                  "ldrsh", "!0r, [!1X, #!2F]", kFixupNone),
     ENCODING_MAP(WIDE(kA64Ldrsh4rXxd), CUSTOM_VARIANTS(0x78e06800, 0x78906800),
                  kFmtRegR, 4, 0, kFmtRegXOrSp, 9, 5, kFmtRegX, 20, 16,
-                 kFmtBitBlt, 12, 12, IS_QUAD_OP | REG_DEF0_USE12 | IS_LOAD,
+                 kFmtBitBlt, 12, 12, IS_QUAD_OP | REG_DEF0_USE12 | IS_LOAD_OFF,
                  "ldrsh", "!0r, [!1X, !2x, lsl #!3d]", kFixupNone),
     ENCODING_MAP(FWIDE(kA64Ldr2fp), SIZE_VARIANTS(0x1c000000),
                  kFmtRegF, 4, 0, kFmtBitBlt, 23, 5, kFmtUnused, -1, -1,
@@ -360,11 +380,11 @@
                  "ldr", "!0r, !1p", kFixupLoad),
     ENCODING_MAP(FWIDE(kA64Ldr3fXD), SIZE_VARIANTS(0xbd400000),
                  kFmtRegF, 4, 0, kFmtRegXOrSp, 9, 5, kFmtBitBlt, 21, 10,
-                 kFmtUnused, -1, -1, IS_TERTIARY_OP | REG_DEF0_USE1 | IS_LOAD,
+                 kFmtUnused, -1, -1, IS_TERTIARY_OP | REG_DEF0_USE1 | IS_LOAD_OFF,
                  "ldr", "!0f, [!1X, #!2D]", kFixupNone),
     ENCODING_MAP(WIDE(kA64Ldr3rXD), SIZE_VARIANTS(0xb9400000),
                  kFmtRegR, 4, 0, kFmtRegXOrSp, 9, 5, kFmtBitBlt, 21, 10,
-                 kFmtUnused, -1, -1, IS_TERTIARY_OP | REG_DEF0_USE1 | IS_LOAD,
+                 kFmtUnused, -1, -1, IS_TERTIARY_OP | REG_DEF0_USE1 | IS_LOAD_OFF,
                  "ldr", "!0r, [!1X, #!2D]", kFixupNone),
     ENCODING_MAP(FWIDE(kA64Ldr4fXxG), SIZE_VARIANTS(0xbc606800),
                  kFmtRegF, 4, 0, kFmtRegXOrSp, 9, 5, kFmtRegX, 20, 16,
@@ -380,11 +400,11 @@
                  "ldr", "!0r, [!1X], #!2d", kFixupNone),
     ENCODING_MAP(WIDE(kA64Ldp4ffXD), CUSTOM_VARIANTS(0x2d400000, 0x6d400000),
                  kFmtRegF, 4, 0, kFmtRegF, 14, 10, kFmtRegXOrSp, 9, 5,
-                 kFmtBitBlt, 21, 15, IS_QUAD_OP | REG_USE2 | REG_DEF01 | IS_LOAD,
+                 kFmtBitBlt, 21, 15, IS_QUAD_OP | REG_USE2 | REG_DEF01 | IS_LOAD_OFF,
                  "ldp", "!0f, !1f, [!2X, #!3D]", kFixupNone),
     ENCODING_MAP(WIDE(kA64Ldp4rrXD), SF_VARIANTS(0x29400000),
                  kFmtRegR, 4, 0, kFmtRegR, 14, 10, kFmtRegXOrSp, 9, 5,
-                 kFmtBitBlt, 21, 15, IS_QUAD_OP | REG_USE2 | REG_DEF01 | IS_LOAD,
+                 kFmtBitBlt, 21, 15, IS_QUAD_OP | REG_USE2 | REG_DEF01 | IS_LOAD_OFF,
                  "ldp", "!0r, !1r, [!2X, #!3D]", kFixupNone),
     ENCODING_MAP(WIDE(kA64LdpPost4rrXD), CUSTOM_VARIANTS(0x28c00000, 0xa8c00000),
                  kFmtRegR, 4, 0, kFmtRegR, 14, 10, kFmtRegXOrSp, 9, 5,
@@ -400,11 +420,11 @@
                  "ldur", "!0r, [!1X, #!2d]", kFixupNone),
     ENCODING_MAP(WIDE(kA64Ldxr2rX), SIZE_VARIANTS(0x885f7c00),
                  kFmtRegR, 4, 0, kFmtRegXOrSp, 9, 5, kFmtUnused, -1, -1,
-                 kFmtUnused, -1, -1, IS_BINARY_OP | REG_DEF0_USE1 | IS_LOAD,
+                 kFmtUnused, -1, -1, IS_BINARY_OP | REG_DEF0_USE1 | IS_LOADX,
                  "ldxr", "!0r, [!1X]", kFixupNone),
     ENCODING_MAP(WIDE(kA64Ldaxr2rX), SIZE_VARIANTS(0x885ffc00),
                  kFmtRegR, 4, 0, kFmtRegXOrSp, 9, 5, kFmtUnused, -1, -1,
-                 kFmtUnused, -1, -1, IS_BINARY_OP | REG_DEF0_USE1 | IS_LOAD,
+                 kFmtUnused, -1, -1, IS_BINARY_OP | REG_DEF0_USE1 | IS_LOADX,
                  "ldaxr", "!0r, [!1X]", kFixupNone),
     ENCODING_MAP(WIDE(kA64Lsl3rrr), SF_VARIANTS(0x1ac02000),
                  kFmtRegR, 4, 0, kFmtRegR, 9, 5, kFmtRegR, 20, 16,
@@ -432,7 +452,7 @@
                  "movz", "!0r, #!1d!2M", kFixupNone),
     ENCODING_MAP(WIDE(kA64Mov2rr), SF_VARIANTS(0x2a0003e0),
                  kFmtRegR, 4, 0, kFmtRegR, 20, 16, kFmtUnused, -1, -1,
-                 kFmtUnused, -1, -1, IS_BINARY_OP | REG_DEF0_USE1,
+                 kFmtUnused, -1, -1, IS_BINARY_OP | REG_DEF0_USE1 | IS_MOVE,
                  "mov", "!0r, !1r", kFixupNone),
     ENCODING_MAP(WIDE(kA64Mvn2rr), SF_VARIANTS(0x2a2003e0),
                  kFmtRegR, 4, 0, kFmtRegR, 20, 16, kFmtUnused, -1, -1,
@@ -508,23 +528,27 @@
                  "smulh", "!0x, !1x, !2x", kFixupNone),
     ENCODING_MAP(WIDE(kA64Stp4ffXD), CUSTOM_VARIANTS(0x2d000000, 0x6d000000),
                  kFmtRegF, 4, 0, kFmtRegF, 14, 10, kFmtRegXOrSp, 9, 5,
-                 kFmtBitBlt, 21, 15, IS_QUAD_OP | REG_USE012 | IS_STORE,
+                 kFmtBitBlt, 21, 15, IS_QUAD_OP | REG_USE012 | IS_STORE_OFF,
                  "stp", "!0f, !1f, [!2X, #!3D]", kFixupNone),
     ENCODING_MAP(WIDE(kA64Stp4rrXD), SF_VARIANTS(0x29000000),
                  kFmtRegR, 4, 0, kFmtRegR, 14, 10, kFmtRegXOrSp, 9, 5,
-                 kFmtBitBlt, 21, 15, IS_QUAD_OP | REG_USE012 | IS_STORE,
+                 kFmtBitBlt, 21, 15, IS_QUAD_OP | REG_USE012 | IS_STORE_OFF,
                  "stp", "!0r, !1r, [!2X, #!3D]", kFixupNone),
     ENCODING_MAP(WIDE(kA64StpPost4rrXD), CUSTOM_VARIANTS(0x28800000, 0xa8800000),
                  kFmtRegR, 4, 0, kFmtRegR, 14, 10, kFmtRegXOrSp, 9, 5,
                  kFmtBitBlt, 21, 15, IS_QUAD_OP | REG_DEF2 | REG_USE012 | IS_STORE,
                  "stp", "!0r, !1r, [!2X], #!3D", kFixupNone),
+    ENCODING_MAP(WIDE(kA64StpPre4ffXD), CUSTOM_VARIANTS(0x2d800000, 0x6d800000),
+                 kFmtRegF, 4, 0, kFmtRegF, 14, 10, kFmtRegXOrSp, 9, 5,
+                 kFmtBitBlt, 21, 15, IS_QUAD_OP | REG_DEF2 | REG_USE012 | IS_STORE,
+                 "stp", "!0f, !1f, [!2X, #!3D]!!", kFixupNone),
     ENCODING_MAP(WIDE(kA64StpPre4rrXD), CUSTOM_VARIANTS(0x29800000, 0xa9800000),
                  kFmtRegR, 4, 0, kFmtRegR, 14, 10, kFmtRegXOrSp, 9, 5,
                  kFmtBitBlt, 21, 15, IS_QUAD_OP | REG_DEF2 | REG_USE012 | IS_STORE,
                  "stp", "!0r, !1r, [!2X, #!3D]!!", kFixupNone),
     ENCODING_MAP(FWIDE(kA64Str3fXD), CUSTOM_VARIANTS(0xbd000000, 0xfd000000),
                  kFmtRegF, 4, 0, kFmtRegXOrSp, 9, 5, kFmtBitBlt, 21, 10,
-                 kFmtUnused, -1, -1, IS_TERTIARY_OP | REG_USE01 | IS_STORE,
+                 kFmtUnused, -1, -1, IS_TERTIARY_OP | REG_USE01 | IS_STORE_OFF,
                  "str", "!0f, [!1X, #!2D]", kFixupNone),
     ENCODING_MAP(FWIDE(kA64Str4fXxG), CUSTOM_VARIANTS(0xbc206800, 0xfc206800),
                  kFmtRegF, 4, 0, kFmtRegXOrSp, 9, 5, kFmtRegX, 20, 16,
@@ -532,7 +556,7 @@
                  "str", "!0f, [!1X, !2x!3G]", kFixupNone),
     ENCODING_MAP(WIDE(kA64Str3rXD), SIZE_VARIANTS(0xb9000000),
                  kFmtRegR, 4, 0, kFmtRegXOrSp, 9, 5, kFmtBitBlt, 21, 10,
-                 kFmtUnused, -1, -1, IS_TERTIARY_OP | REG_USE01 | IS_STORE,
+                 kFmtUnused, -1, -1, IS_TERTIARY_OP | REG_USE01 | IS_STORE_OFF,
                  "str", "!0r, [!1X, #!2D]", kFixupNone),
     ENCODING_MAP(WIDE(kA64Str4rXxG), SIZE_VARIANTS(0xb8206800),
                  kFmtRegR, 4, 0, kFmtRegXOrSp, 9, 5, kFmtRegX, 20, 16,
@@ -540,7 +564,7 @@
                  "str", "!0r, [!1X, !2x!3G]", kFixupNone),
     ENCODING_MAP(kA64Strb3wXd, NO_VARIANTS(0x39000000),
                  kFmtRegW, 4, 0, kFmtRegXOrSp, 9, 5, kFmtBitBlt, 21, 10,
-                 kFmtUnused, -1, -1, IS_TERTIARY_OP | REG_USE01 | IS_STORE,
+                 kFmtUnused, -1, -1, IS_TERTIARY_OP | REG_USE01 | IS_STORE_OFF,
                  "strb", "!0w, [!1X, #!2d]", kFixupNone),
     ENCODING_MAP(kA64Strb3wXx, NO_VARIANTS(0x38206800),
                  kFmtRegW, 4, 0, kFmtRegXOrSp, 9, 5, kFmtRegX, 20, 16,
@@ -548,7 +572,7 @@
                  "strb", "!0w, [!1X, !2x]", kFixupNone),
     ENCODING_MAP(kA64Strh3wXF, NO_VARIANTS(0x79000000),
                  kFmtRegW, 4, 0, kFmtRegXOrSp, 9, 5, kFmtBitBlt, 21, 10,
-                 kFmtUnused, -1, -1, IS_TERTIARY_OP | REG_USE01 | IS_STORE,
+                 kFmtUnused, -1, -1, IS_TERTIARY_OP | REG_USE01 | IS_STORE_OFF,
                  "strh", "!0w, [!1X, #!2F]", kFixupNone),
     ENCODING_MAP(kA64Strh4wXxd, NO_VARIANTS(0x78206800),
                  kFmtRegW, 4, 0, kFmtRegXOrSp, 9, 5, kFmtRegX, 20, 16,
@@ -568,11 +592,11 @@
                  "stur", "!0r, [!1X, #!2d]", kFixupNone),
     ENCODING_MAP(WIDE(kA64Stxr3wrX), SIZE_VARIANTS(0x88007c00),
                  kFmtRegW, 20, 16, kFmtRegR, 4, 0, kFmtRegXOrSp, 9, 5,
-                 kFmtUnused, -1, -1, IS_TERTIARY_OP | REG_DEF0_USE12 | IS_STORE,
+                 kFmtUnused, -1, -1, IS_TERTIARY_OP | REG_DEF0_USE12 | IS_STOREX,
                  "stxr", "!0w, !1r, [!2X]", kFixupNone),
     ENCODING_MAP(WIDE(kA64Stlxr3wrX), SIZE_VARIANTS(0x8800fc00),
                  kFmtRegW, 20, 16, kFmtRegR, 4, 0, kFmtRegXOrSp, 9, 5,
-                 kFmtUnused, -1, -1, IS_TERTIARY_OP | REG_DEF0_USE12 | IS_STORE,
+                 kFmtUnused, -1, -1, IS_TERTIARY_OP | REG_DEF0_USE12 | IS_STOREX,
                  "stlxr", "!0w, !1r, [!2X]", kFixupNone),
     ENCODING_MAP(WIDE(kA64Sub4RRdT), SF_VARIANTS(0x51000000),
                  kFmtRegROrSp, 4, 0, kFmtRegROrSp, 9, 5, kFmtBitBlt, 21, 10,
@@ -723,6 +747,7 @@
                              << " @ 0x" << std::hex << lir->dalvik_offset;
                 if (kFailOnSizeError) {
                   LOG(FATAL) << "Bad argument n. " << i << " of " << encoder->name
+                             << "(" << UNWIDE(encoder->opcode) << ", " << encoder->fmt << ")"
                              << ". Expected " << expected << ", got 0x" << std::hex << operand;
                 } else {
                   LOG(WARNING) << "Bad argument n. " << i << " of " << encoder->name
diff --git a/compiler/dex/quick/arm64/call_arm64.cc b/compiler/dex/quick/arm64/call_arm64.cc
index e584548..6fa8a4a 100644
--- a/compiler/dex/quick/arm64/call_arm64.cc
+++ b/compiler/dex/quick/arm64/call_arm64.cc
@@ -330,19 +330,14 @@
 
   NewLIR0(kPseudoMethodEntry);
 
-  const size_t kStackOverflowReservedUsableBytes = GetStackOverflowReservedBytes(kArm64) -
-      Thread::kStackOverflowSignalReservedBytes;
-  const bool large_frame = static_cast<size_t>(frame_size_) > kStackOverflowReservedUsableBytes;
   const int spill_count = num_core_spills_ + num_fp_spills_;
   const int spill_size = (spill_count * kArm64PointerSize + 15) & ~0xf;  // SP 16 byte alignment.
   const int frame_size_without_spills = frame_size_ - spill_size;
 
   if (!skip_overflow_check) {
     if (!cu_->compiler_driver->GetCompilerOptions().GetImplicitStackOverflowChecks()) {
-      if (!large_frame) {
-        // Load stack limit
-        LoadWordDisp(rs_xSELF, Thread::StackEndOffset<8>().Int32Value(), rs_xIP1);
-      }
+      // Load stack limit
+      LoadWordDisp(rs_xSELF, Thread::StackEndOffset<8>().Int32Value(), rs_xIP1);
     } else {
       // TODO(Arm64) Implement implicit checks.
       // Implicit stack overflow check.
@@ -350,24 +345,21 @@
       // redzone we will get a segmentation fault.
       // Load32Disp(rs_wSP, -Thread::kStackOverflowReservedBytes, rs_wzr);
       // MarkPossibleStackOverflowException();
+      //
+      // TODO: If the frame size is small enough, is it possible to make this a pre-indexed load,
+      //       so that we can avoid the following "sub sp" when spilling?
       LOG(FATAL) << "Implicit stack overflow checks not implemented.";
     }
   }
 
-  if (frame_size_ > 0) {
-    OpRegImm64(kOpSub, rs_sp, spill_size);
+  int spilled_already = 0;
+  if (spill_size > 0) {
+    spilled_already = SpillRegs(rs_sp, core_spill_mask_, fp_spill_mask_, frame_size_);
+    DCHECK(spill_size == spilled_already || frame_size_ == spilled_already);
   }
 
-  /* Need to spill any FP regs? */
-  if (fp_spill_mask_) {
-    int spill_offset = spill_size - kArm64PointerSize*(num_fp_spills_ + num_core_spills_);
-    SpillFPRegs(rs_sp, spill_offset, fp_spill_mask_);
-  }
-
-  /* Spill core callee saves. */
-  if (core_spill_mask_) {
-    int spill_offset = spill_size - kArm64PointerSize*num_core_spills_;
-    SpillCoreRegs(rs_sp, spill_offset, core_spill_mask_);
+  if (spilled_already != frame_size_) {
+    OpRegImm(kOpSub, rs_sp, frame_size_without_spills);
   }
 
   if (!skip_overflow_check) {
@@ -396,29 +388,9 @@
         const size_t sp_displace_;
       };
 
-      if (large_frame) {
-        // Compare Expected SP against bottom of stack.
-        // Branch to throw target if there is not enough room.
-        OpRegRegImm(kOpSub, rs_xIP1, rs_sp, frame_size_without_spills);
-        LoadWordDisp(rs_xSELF, Thread::StackEndOffset<8>().Int32Value(), rs_xIP0);
-        LIR* branch = OpCmpBranch(kCondUlt, rs_xIP1, rs_xIP0, nullptr);
-        AddSlowPath(new(arena_)StackOverflowSlowPath(this, branch, spill_size));
-        OpRegCopy(rs_sp, rs_xIP1);  // Establish stack after checks.
-      } else {
-        /*
-         * If the frame is small enough we are guaranteed to have enough space that remains to
-         * handle signals on the user stack.
-         * Establishes stack before checks.
-         */
-        OpRegRegImm(kOpSub, rs_sp, rs_sp, frame_size_without_spills);
-        LIR* branch = OpCmpBranch(kCondUlt, rs_sp, rs_xIP1, nullptr);
-        AddSlowPath(new(arena_)StackOverflowSlowPath(this, branch, frame_size_));
-      }
-    } else {
-      OpRegImm(kOpSub, rs_sp, frame_size_without_spills);
+      LIR* branch = OpCmpBranch(kCondUlt, rs_sp, rs_xIP1, nullptr);
+      AddSlowPath(new(arena_)StackOverflowSlowPath(this, branch, frame_size_));
     }
-  } else {
-    OpRegImm(kOpSub, rs_sp, frame_size_without_spills);
   }
 
   FlushIns(ArgLocs, rl_method);
@@ -445,57 +417,7 @@
 
   NewLIR0(kPseudoMethodExit);
 
-  // Restore saves and drop stack frame.
-  // 2 versions:
-  //
-  // 1. (Original): Try to address directly, then drop the whole frame.
-  //                Limitation: ldp is a 7b signed immediate. There should have been a DCHECK!
-  //
-  // 2. (New): Drop the non-save-part. Then do similar to original, which is now guaranteed to be
-  //           in range. Then drop the rest.
-  //
-  // TODO: In methods with few spills but huge frame, it would be better to do non-immediate loads
-  //       in variant 1.
-
-  if (frame_size_ <= 504) {
-    // "Magic" constant, 63 (max signed 7b) * 8. Do variant 1.
-    // Could be tighter, as the last load is below frame_size_ offset.
-    if (fp_spill_mask_) {
-      int spill_offset = frame_size_ - kArm64PointerSize * (num_fp_spills_ + num_core_spills_);
-      UnSpillFPRegs(rs_sp, spill_offset, fp_spill_mask_);
-    }
-    if (core_spill_mask_) {
-      int spill_offset = frame_size_ - kArm64PointerSize * num_core_spills_;
-      UnSpillCoreRegs(rs_sp, spill_offset, core_spill_mask_);
-    }
-
-    OpRegImm64(kOpAdd, rs_sp, frame_size_);
-  } else {
-    // Second variant. Drop the frame part.
-    int drop = 0;
-    // TODO: Always use the first formula, as num_fp_spills would be zero?
-    if (fp_spill_mask_) {
-      drop = frame_size_ - kArm64PointerSize * (num_fp_spills_ + num_core_spills_);
-    } else {
-      drop = frame_size_ - kArm64PointerSize * num_core_spills_;
-    }
-
-    // Drop needs to be 16B aligned, so that SP keeps aligned.
-    drop = RoundDown(drop, 16);
-
-    OpRegImm64(kOpAdd, rs_sp, drop);
-
-    if (fp_spill_mask_) {
-      int offset = frame_size_ - drop - kArm64PointerSize * (num_fp_spills_ + num_core_spills_);
-      UnSpillFPRegs(rs_sp, offset, fp_spill_mask_);
-    }
-    if (core_spill_mask_) {
-      int offset = frame_size_ - drop - kArm64PointerSize * num_core_spills_;
-      UnSpillCoreRegs(rs_sp, offset, core_spill_mask_);
-    }
-
-    OpRegImm64(kOpAdd, rs_sp, frame_size_ - drop);
-  }
+  UnspillRegs(rs_sp, core_spill_mask_, fp_spill_mask_, frame_size_);
 
   // Finally return.
   NewLIR0(kA64Ret);
diff --git a/compiler/dex/quick/arm64/codegen_arm64.h b/compiler/dex/quick/arm64/codegen_arm64.h
index ac36519..b182cc0 100644
--- a/compiler/dex/quick/arm64/codegen_arm64.h
+++ b/compiler/dex/quick/arm64/codegen_arm64.h
@@ -59,328 +59,340 @@
     bool initialized_;
   };
 
-  public:
-    Arm64Mir2Lir(CompilationUnit* cu, MIRGraph* mir_graph, ArenaAllocator* arena);
+ public:
+  Arm64Mir2Lir(CompilationUnit* cu, MIRGraph* mir_graph, ArenaAllocator* arena);
 
-    // Required for target - codegen helpers.
-    bool SmallLiteralDivRem(Instruction::Code dalvik_opcode, bool is_div, RegLocation rl_src,
-                            RegLocation rl_dest, int lit) OVERRIDE;
-    bool SmallLiteralDivRem64(Instruction::Code dalvik_opcode, bool is_div, RegLocation rl_src,
-                              RegLocation rl_dest, int64_t lit);
-    bool HandleEasyDivRem(Instruction::Code dalvik_opcode, bool is_div,
-                          RegLocation rl_src, RegLocation rl_dest, int lit) OVERRIDE;
-    bool HandleEasyDivRem64(Instruction::Code dalvik_opcode, bool is_div,
-                            RegLocation rl_src, RegLocation rl_dest, int64_t lit);
-    bool EasyMultiply(RegLocation rl_src, RegLocation rl_dest, int lit) OVERRIDE;
-    LIR* CheckSuspendUsingLoad() OVERRIDE;
-    RegStorage LoadHelper(QuickEntrypointEnum trampoline) OVERRIDE;
-    LIR* LoadBaseDisp(RegStorage r_base, int displacement, RegStorage r_dest,
-                      OpSize size, VolatileKind is_volatile) OVERRIDE;
-    LIR* LoadRefDisp(RegStorage r_base, int displacement, RegStorage r_dest,
-                     VolatileKind is_volatile)
-        OVERRIDE;
-    LIR* LoadBaseIndexed(RegStorage r_base, RegStorage r_index, RegStorage r_dest, int scale,
-                         OpSize size) OVERRIDE;
-    LIR* LoadRefIndexed(RegStorage r_base, RegStorage r_index, RegStorage r_dest, int scale)
-        OVERRIDE;
-    LIR* LoadConstantNoClobber(RegStorage r_dest, int value);
-    LIR* LoadConstantWide(RegStorage r_dest, int64_t value);
-    LIR* StoreBaseDisp(RegStorage r_base, int displacement, RegStorage r_src,
-                       OpSize size, VolatileKind is_volatile) OVERRIDE;
-    LIR* StoreRefDisp(RegStorage r_base, int displacement, RegStorage r_src,
-                      VolatileKind is_volatile) OVERRIDE;
-    LIR* StoreBaseIndexed(RegStorage r_base, RegStorage r_index, RegStorage r_src, int scale,
-                          OpSize size) OVERRIDE;
-    LIR* StoreRefIndexed(RegStorage r_base, RegStorage r_index, RegStorage r_src, int scale)
-        OVERRIDE;
-    void MarkGCCard(RegStorage val_reg, RegStorage tgt_addr_reg) OVERRIDE;
-    LIR* OpCmpMemImmBranch(ConditionCode cond, RegStorage temp_reg, RegStorage base_reg,
-                           int offset, int check_value, LIR* target, LIR** compare) OVERRIDE;
+  // Required for target - codegen helpers.
+  bool SmallLiteralDivRem(Instruction::Code dalvik_opcode, bool is_div, RegLocation rl_src,
+                          RegLocation rl_dest, int lit) OVERRIDE;
+  bool HandleEasyDivRem(Instruction::Code dalvik_opcode, bool is_div,
+                        RegLocation rl_src, RegLocation rl_dest, int lit) OVERRIDE;
+  bool HandleEasyDivRem64(Instruction::Code dalvik_opcode, bool is_div,
+                          RegLocation rl_src, RegLocation rl_dest, int64_t lit);
+  bool EasyMultiply(RegLocation rl_src, RegLocation rl_dest, int lit) OVERRIDE;
+  LIR* CheckSuspendUsingLoad() OVERRIDE;
+  RegStorage LoadHelper(QuickEntrypointEnum trampoline) OVERRIDE;
+  LIR* LoadBaseDisp(RegStorage r_base, int displacement, RegStorage r_dest,
+                    OpSize size, VolatileKind is_volatile) OVERRIDE;
+  LIR* LoadRefDisp(RegStorage r_base, int displacement, RegStorage r_dest,
+                   VolatileKind is_volatile) OVERRIDE;
+  LIR* LoadBaseIndexed(RegStorage r_base, RegStorage r_index, RegStorage r_dest, int scale,
+                       OpSize size) OVERRIDE;
+  LIR* LoadRefIndexed(RegStorage r_base, RegStorage r_index, RegStorage r_dest, int scale)
+      OVERRIDE;
+  LIR* LoadConstantNoClobber(RegStorage r_dest, int value) OVERRIDE;
+  LIR* LoadConstantWide(RegStorage r_dest, int64_t value) OVERRIDE;
+  LIR* StoreBaseDisp(RegStorage r_base, int displacement, RegStorage r_src, OpSize size,
+                     VolatileKind is_volatile) OVERRIDE;
+  LIR* StoreRefDisp(RegStorage r_base, int displacement, RegStorage r_src, VolatileKind is_volatile)
+      OVERRIDE;
+  LIR* StoreBaseIndexed(RegStorage r_base, RegStorage r_index, RegStorage r_src, int scale,
+                        OpSize size) OVERRIDE;
+  LIR* StoreRefIndexed(RegStorage r_base, RegStorage r_index, RegStorage r_src, int scale) OVERRIDE;
+  void MarkGCCard(RegStorage val_reg, RegStorage tgt_addr_reg) OVERRIDE;
+  LIR* OpCmpMemImmBranch(ConditionCode cond, RegStorage temp_reg, RegStorage base_reg,
+                         int offset, int check_value, LIR* target, LIR** compare) OVERRIDE;
 
-    // Required for target - register utilities.
-    RegStorage TargetReg(SpecialTargetRegister reg) OVERRIDE;
-    RegStorage TargetReg(SpecialTargetRegister symbolic_reg, WideKind wide_kind) OVERRIDE {
-      if (wide_kind == kWide || wide_kind == kRef) {
-        return As64BitReg(TargetReg(symbolic_reg));
-      } else {
-        return Check32BitReg(TargetReg(symbolic_reg));
-      }
-    }
-    RegStorage TargetPtrReg(SpecialTargetRegister symbolic_reg) OVERRIDE {
+  // Required for target - register utilities.
+  RegStorage TargetReg(SpecialTargetRegister reg) OVERRIDE;
+  RegStorage TargetReg(SpecialTargetRegister symbolic_reg, WideKind wide_kind) OVERRIDE {
+    if (wide_kind == kWide || wide_kind == kRef) {
       return As64BitReg(TargetReg(symbolic_reg));
+    } else {
+      return Check32BitReg(TargetReg(symbolic_reg));
     }
-    RegStorage GetArgMappingToPhysicalReg(int arg_num);
-    RegLocation GetReturnAlt();
-    RegLocation GetReturnWideAlt();
-    RegLocation LocCReturn();
-    RegLocation LocCReturnRef();
-    RegLocation LocCReturnDouble();
-    RegLocation LocCReturnFloat();
-    RegLocation LocCReturnWide();
-    ResourceMask GetRegMaskCommon(const RegStorage& reg) const OVERRIDE;
-    void AdjustSpillMask();
-    void ClobberCallerSave();
-    void FreeCallTemps();
-    void LockCallTemps();
-    void CompilerInitializeRegAlloc();
+  }
+  RegStorage TargetPtrReg(SpecialTargetRegister symbolic_reg) OVERRIDE {
+    return As64BitReg(TargetReg(symbolic_reg));
+  }
+  RegStorage GetArgMappingToPhysicalReg(int arg_num) OVERRIDE;
+  RegLocation GetReturnAlt() OVERRIDE;
+  RegLocation GetReturnWideAlt() OVERRIDE;
+  RegLocation LocCReturn() OVERRIDE;
+  RegLocation LocCReturnRef() OVERRIDE;
+  RegLocation LocCReturnDouble() OVERRIDE;
+  RegLocation LocCReturnFloat() OVERRIDE;
+  RegLocation LocCReturnWide() OVERRIDE;
+  ResourceMask GetRegMaskCommon(const RegStorage& reg) const OVERRIDE;
+  void AdjustSpillMask() OVERRIDE;
+  void ClobberCallerSave() OVERRIDE;
+  void FreeCallTemps() OVERRIDE;
+  void LockCallTemps() OVERRIDE;
+  void CompilerInitializeRegAlloc() OVERRIDE;
 
-    // Required for target - miscellaneous.
-    void AssembleLIR();
-    uint32_t LinkFixupInsns(LIR* head_lir, LIR* tail_lir, CodeOffset offset);
-    int AssignInsnOffsets();
-    void AssignOffsets();
-    uint8_t* EncodeLIRs(uint8_t* write_pos, LIR* lir);
-    void DumpResourceMask(LIR* lir, const ResourceMask& mask, const char* prefix) OVERRIDE;
-    void SetupTargetResourceMasks(LIR* lir, uint64_t flags,
-                                  ResourceMask* use_mask, ResourceMask* def_mask) OVERRIDE;
-    const char* GetTargetInstFmt(int opcode);
-    const char* GetTargetInstName(int opcode);
-    std::string BuildInsnString(const char* fmt, LIR* lir, unsigned char* base_addr);
-    ResourceMask GetPCUseDefEncoding() const OVERRIDE;
-    uint64_t GetTargetInstFlags(int opcode);
-    size_t GetInsnSize(LIR* lir) OVERRIDE;
-    bool IsUnconditionalBranch(LIR* lir);
+  // Required for target - miscellaneous.
+  void AssembleLIR() OVERRIDE;
+  void DumpResourceMask(LIR* lir, const ResourceMask& mask, const char* prefix) OVERRIDE;
+  void SetupTargetResourceMasks(LIR* lir, uint64_t flags,
+                                ResourceMask* use_mask, ResourceMask* def_mask) OVERRIDE;
+  const char* GetTargetInstFmt(int opcode) OVERRIDE;
+  const char* GetTargetInstName(int opcode) OVERRIDE;
+  std::string BuildInsnString(const char* fmt, LIR* lir, unsigned char* base_addr) OVERRIDE;
+  ResourceMask GetPCUseDefEncoding() const OVERRIDE;
+  uint64_t GetTargetInstFlags(int opcode) OVERRIDE;
+  size_t GetInsnSize(LIR* lir) OVERRIDE;
+  bool IsUnconditionalBranch(LIR* lir) OVERRIDE;
 
-    // Get the register class for load/store of a field.
-    RegisterClass RegClassForFieldLoadStore(OpSize size, bool is_volatile) OVERRIDE;
+  // Get the register class for load/store of a field.
+  RegisterClass RegClassForFieldLoadStore(OpSize size, bool is_volatile) OVERRIDE;
 
-    // Required for target - Dalvik-level generators.
-    void GenShiftOpLong(Instruction::Code opcode, RegLocation rl_dest, RegLocation rl_src1,
-                        RegLocation lr_shift);
-    void GenArithImmOpLong(Instruction::Code opcode, RegLocation rl_dest,
-                           RegLocation rl_src1, RegLocation rl_src2);
-    void GenArrayGet(int opt_flags, OpSize size, RegLocation rl_array,
-                     RegLocation rl_index, RegLocation rl_dest, int scale);
-    void GenArrayPut(int opt_flags, OpSize size, RegLocation rl_array, RegLocation rl_index,
-                     RegLocation rl_src, int scale, bool card_mark);
-    void GenShiftImmOpLong(Instruction::Code opcode, RegLocation rl_dest,
-                           RegLocation rl_src1, RegLocation rl_shift);
-    void GenLongOp(OpKind op, RegLocation rl_dest, RegLocation rl_src1, RegLocation rl_src2);
-    void GenMulLong(Instruction::Code opcode, RegLocation rl_dest, RegLocation rl_src1,
-                    RegLocation rl_src2);
-    void GenAddLong(Instruction::Code opcode, RegLocation rl_dest, RegLocation rl_src1,
-                    RegLocation rl_src2);
-    void GenAndLong(Instruction::Code opcode, RegLocation rl_dest, RegLocation rl_src1,
-                    RegLocation rl_src2);
-    void GenArithOpDouble(Instruction::Code opcode, RegLocation rl_dest, RegLocation rl_src1,
-                          RegLocation rl_src2);
-    void GenArithOpFloat(Instruction::Code opcode, RegLocation rl_dest, RegLocation rl_src1,
-                         RegLocation rl_src2);
-    void GenCmpFP(Instruction::Code opcode, RegLocation rl_dest, RegLocation rl_src1,
-                  RegLocation rl_src2);
-    void GenConversion(Instruction::Code opcode, RegLocation rl_dest, RegLocation rl_src);
-    bool GenInlinedReverseBits(CallInfo* info, OpSize size);
-    bool GenInlinedAbsFloat(CallInfo* info) OVERRIDE;
-    bool GenInlinedAbsDouble(CallInfo* info) OVERRIDE;
-    bool GenInlinedCas(CallInfo* info, bool is_long, bool is_object);
-    bool GenInlinedMinMax(CallInfo* info, bool is_min, bool is_long);
-    bool GenInlinedMinMaxFP(CallInfo* info, bool is_min, bool is_double);
-    bool GenInlinedSqrt(CallInfo* info);
-    bool GenInlinedPeek(CallInfo* info, OpSize size);
-    bool GenInlinedPoke(CallInfo* info, OpSize size);
-    bool GenInlinedAbsLong(CallInfo* info);
-    void GenIntToLong(RegLocation rl_dest, RegLocation rl_src);
-    void GenNotLong(RegLocation rl_dest, RegLocation rl_src);
-    void GenNegLong(RegLocation rl_dest, RegLocation rl_src);
-    void GenOrLong(Instruction::Code opcode, RegLocation rl_dest, RegLocation rl_src1,
-                   RegLocation rl_src2);
-    void GenSubLong(Instruction::Code opcode, RegLocation rl_dest, RegLocation rl_src1,
-                    RegLocation rl_src2);
-    void GenXorLong(Instruction::Code opcode, RegLocation rl_dest, RegLocation rl_src1,
-                    RegLocation rl_src2);
-    void GenDivRemLong(Instruction::Code opcode, RegLocation rl_dest, RegLocation rl_src1,
-                       RegLocation rl_src2, bool is_div);
-    RegLocation GenDivRem(RegLocation rl_dest, RegStorage reg_lo, RegStorage reg_hi, bool is_div);
-    RegLocation GenDivRemLit(RegLocation rl_dest, RegStorage reg_lo, int lit, bool is_div);
-    void GenCmpLong(RegLocation rl_dest, RegLocation rl_src1, RegLocation rl_src2);
-    void GenDivZeroCheckWide(RegStorage reg);
-    void GenEntrySequence(RegLocation* ArgLocs, RegLocation rl_method);
-    void GenExitSequence();
-    void GenSpecialExitSequence();
-    void GenFillArrayData(DexOffset table_offset, RegLocation rl_src);
-    void GenFusedFPCmpBranch(BasicBlock* bb, MIR* mir, bool gt_bias, bool is_double);
-    void GenFusedLongCmpBranch(BasicBlock* bb, MIR* mir);
-    void GenSelect(BasicBlock* bb, MIR* mir) OVERRIDE;
-    void GenSelectConst32(RegStorage left_op, RegStorage right_op, ConditionCode code,
-                          int32_t true_val, int32_t false_val, RegStorage rs_dest,
-                          int dest_reg_class) OVERRIDE;
-    // Helper used in the above two.
-    void GenSelect(int32_t left, int32_t right, ConditionCode code, RegStorage rs_dest,
-                   int result_reg_class);
+  // Required for target - Dalvik-level generators.
+  void GenShiftOpLong(Instruction::Code opcode, RegLocation rl_dest, RegLocation rl_src1,
+                      RegLocation lr_shift) OVERRIDE;
+  void GenArithImmOpLong(Instruction::Code opcode, RegLocation rl_dest, RegLocation rl_src1,
+                         RegLocation rl_src2) OVERRIDE;
+  void GenArrayGet(int opt_flags, OpSize size, RegLocation rl_array, RegLocation rl_index,
+                   RegLocation rl_dest, int scale) OVERRIDE;
+  void GenArrayPut(int opt_flags, OpSize size, RegLocation rl_array, RegLocation rl_index,
+                   RegLocation rl_src, int scale, bool card_mark) OVERRIDE;
+  void GenShiftImmOpLong(Instruction::Code opcode, RegLocation rl_dest, RegLocation rl_src1,
+                         RegLocation rl_shift) OVERRIDE;
+  void GenArithOpDouble(Instruction::Code opcode, RegLocation rl_dest, RegLocation rl_src1,
+                        RegLocation rl_src2) OVERRIDE;
+  void GenArithOpFloat(Instruction::Code opcode, RegLocation rl_dest, RegLocation rl_src1,
+                       RegLocation rl_src2) OVERRIDE;
+  void GenCmpFP(Instruction::Code opcode, RegLocation rl_dest, RegLocation rl_src1,
+                RegLocation rl_src2) OVERRIDE;
+  void GenConversion(Instruction::Code opcode, RegLocation rl_dest, RegLocation rl_src) OVERRIDE;
+  bool GenInlinedReverseBits(CallInfo* info, OpSize size) OVERRIDE;
+  bool GenInlinedAbsFloat(CallInfo* info) OVERRIDE;
+  bool GenInlinedAbsDouble(CallInfo* info) OVERRIDE;
+  bool GenInlinedCas(CallInfo* info, bool is_long, bool is_object) OVERRIDE;
+  bool GenInlinedMinMax(CallInfo* info, bool is_min, bool is_long) OVERRIDE;
+  bool GenInlinedMinMaxFP(CallInfo* info, bool is_min, bool is_double) OVERRIDE;
+  bool GenInlinedSqrt(CallInfo* info) OVERRIDE;
+  bool GenInlinedCeil(CallInfo* info) OVERRIDE;
+  bool GenInlinedFloor(CallInfo* info) OVERRIDE;
+  bool GenInlinedRint(CallInfo* info) OVERRIDE;
+  bool GenInlinedRound(CallInfo* info, bool is_double) OVERRIDE;
+  bool GenInlinedPeek(CallInfo* info, OpSize size) OVERRIDE;
+  bool GenInlinedPoke(CallInfo* info, OpSize size) OVERRIDE;
+  bool GenInlinedAbsLong(CallInfo* info) OVERRIDE;
+  void GenIntToLong(RegLocation rl_dest, RegLocation rl_src) OVERRIDE;
+  void GenArithOpLong(Instruction::Code opcode, RegLocation rl_dest, RegLocation rl_src1,
+                      RegLocation rl_src2) OVERRIDE;
+  RegLocation GenDivRem(RegLocation rl_dest, RegStorage reg_lo, RegStorage reg_hi, bool is_div)
+      OVERRIDE;
+  RegLocation GenDivRemLit(RegLocation rl_dest, RegStorage reg_lo, int lit, bool is_div)
+      OVERRIDE;
+  void GenCmpLong(RegLocation rl_dest, RegLocation rl_src1, RegLocation rl_src2)  OVERRIDE;
+  void GenDivZeroCheckWide(RegStorage reg) OVERRIDE;
+  void GenEntrySequence(RegLocation* ArgLocs, RegLocation rl_method) OVERRIDE;
+  void GenExitSequence() OVERRIDE;
+  void GenSpecialExitSequence() OVERRIDE;
+  void GenFillArrayData(DexOffset table_offset, RegLocation rl_src) OVERRIDE;
+  void GenFusedFPCmpBranch(BasicBlock* bb, MIR* mir, bool gt_bias, bool is_double) OVERRIDE;
+  void GenFusedLongCmpBranch(BasicBlock* bb, MIR* mir) OVERRIDE;
+  void GenSelect(BasicBlock* bb, MIR* mir) OVERRIDE;
+  void GenSelectConst32(RegStorage left_op, RegStorage right_op, ConditionCode code,
+                        int32_t true_val, int32_t false_val, RegStorage rs_dest,
+                        int dest_reg_class) OVERRIDE;
 
-    bool GenMemBarrier(MemBarrierKind barrier_kind);
-    void GenMonitorEnter(int opt_flags, RegLocation rl_src);
-    void GenMonitorExit(int opt_flags, RegLocation rl_src);
-    void GenMoveException(RegLocation rl_dest);
-    void GenMultiplyByTwoBitMultiplier(RegLocation rl_src, RegLocation rl_result, int lit,
-                                       int first_bit, int second_bit);
-    void GenNegDouble(RegLocation rl_dest, RegLocation rl_src);
-    void GenNegFloat(RegLocation rl_dest, RegLocation rl_src);
-    void GenPackedSwitch(MIR* mir, DexOffset table_offset, RegLocation rl_src);
-    void GenSparseSwitch(MIR* mir, DexOffset table_offset, RegLocation rl_src);
+  bool GenMemBarrier(MemBarrierKind barrier_kind) OVERRIDE;
+  void GenMonitorEnter(int opt_flags, RegLocation rl_src) OVERRIDE;
+  void GenMonitorExit(int opt_flags, RegLocation rl_src) OVERRIDE;
+  void GenMoveException(RegLocation rl_dest) OVERRIDE;
+  void GenMultiplyByTwoBitMultiplier(RegLocation rl_src, RegLocation rl_result, int lit,
+                                     int first_bit, int second_bit) OVERRIDE;
+  void GenNegDouble(RegLocation rl_dest, RegLocation rl_src) OVERRIDE;
+  void GenNegFloat(RegLocation rl_dest, RegLocation rl_src) OVERRIDE;
+  void GenPackedSwitch(MIR* mir, DexOffset table_offset, RegLocation rl_src) OVERRIDE;
+  void GenSparseSwitch(MIR* mir, DexOffset table_offset, RegLocation rl_src) OVERRIDE;
 
-    uint32_t GenPairWise(uint32_t reg_mask, int* reg1, int* reg2);
-    void UnSpillCoreRegs(RegStorage base, int offset, uint32_t reg_mask);
-    void SpillCoreRegs(RegStorage base, int offset, uint32_t reg_mask);
-    void UnSpillFPRegs(RegStorage base, int offset, uint32_t reg_mask);
-    void SpillFPRegs(RegStorage base, int offset, uint32_t reg_mask);
+  // Required for target - single operation generators.
+  LIR* OpUnconditionalBranch(LIR* target) OVERRIDE;
+  LIR* OpCmpBranch(ConditionCode cond, RegStorage src1, RegStorage src2, LIR* target) OVERRIDE;
+  LIR* OpCmpImmBranch(ConditionCode cond, RegStorage reg, int check_value, LIR* target) OVERRIDE;
+  LIR* OpCondBranch(ConditionCode cc, LIR* target) OVERRIDE;
+  LIR* OpDecAndBranch(ConditionCode c_code, RegStorage reg, LIR* target) OVERRIDE;
+  LIR* OpFpRegCopy(RegStorage r_dest, RegStorage r_src) OVERRIDE;
+  LIR* OpIT(ConditionCode cond, const char* guide) OVERRIDE;
+  void OpEndIT(LIR* it) OVERRIDE;
+  LIR* OpMem(OpKind op, RegStorage r_base, int disp) OVERRIDE;
+  LIR* OpPcRelLoad(RegStorage reg, LIR* target) OVERRIDE;
+  LIR* OpReg(OpKind op, RegStorage r_dest_src) OVERRIDE;
+  void OpRegCopy(RegStorage r_dest, RegStorage r_src) OVERRIDE;
+  LIR* OpRegCopyNoInsert(RegStorage r_dest, RegStorage r_src) OVERRIDE;
+  LIR* OpRegImm(OpKind op, RegStorage r_dest_src1, int value) OVERRIDE;
+  LIR* OpRegReg(OpKind op, RegStorage r_dest_src1, RegStorage r_src2) OVERRIDE;
+  LIR* OpMovRegMem(RegStorage r_dest, RegStorage r_base, int offset, MoveType move_type) OVERRIDE;
+  LIR* OpMovMemReg(RegStorage r_base, int offset, RegStorage r_src, MoveType move_type) OVERRIDE;
+  LIR* OpCondRegReg(OpKind op, ConditionCode cc, RegStorage r_dest, RegStorage r_src) OVERRIDE;
+  LIR* OpRegRegImm(OpKind op, RegStorage r_dest, RegStorage r_src1, int value) OVERRIDE;
+  LIR* OpRegRegReg(OpKind op, RegStorage r_dest, RegStorage r_src1, RegStorage r_src2) OVERRIDE;
+  LIR* OpTestSuspend(LIR* target) OVERRIDE;
+  LIR* OpVldm(RegStorage r_base, int count) OVERRIDE;
+  LIR* OpVstm(RegStorage r_base, int count) OVERRIDE;
+  void OpRegCopyWide(RegStorage dest, RegStorage src) OVERRIDE;
 
-    // Required for target - single operation generators.
-    LIR* OpUnconditionalBranch(LIR* target);
-    LIR* OpCmpBranch(ConditionCode cond, RegStorage src1, RegStorage src2, LIR* target);
-    LIR* OpCmpImmBranch(ConditionCode cond, RegStorage reg, int check_value, LIR* target);
-    LIR* OpCondBranch(ConditionCode cc, LIR* target);
-    LIR* OpDecAndBranch(ConditionCode c_code, RegStorage reg, LIR* target);
-    LIR* OpFpRegCopy(RegStorage r_dest, RegStorage r_src);
-    LIR* OpIT(ConditionCode cond, const char* guide);
-    void OpEndIT(LIR* it);
-    LIR* OpMem(OpKind op, RegStorage r_base, int disp);
-    LIR* OpPcRelLoad(RegStorage reg, LIR* target);
-    LIR* OpReg(OpKind op, RegStorage r_dest_src);
-    void OpRegCopy(RegStorage r_dest, RegStorage r_src);
-    LIR* OpRegCopyNoInsert(RegStorage r_dest, RegStorage r_src);
-    LIR* OpRegImm64(OpKind op, RegStorage r_dest_src1, int64_t value);
-    LIR* OpRegImm(OpKind op, RegStorage r_dest_src1, int value);
-    LIR* OpRegReg(OpKind op, RegStorage r_dest_src1, RegStorage r_src2);
-    LIR* OpMovRegMem(RegStorage r_dest, RegStorage r_base, int offset, MoveType move_type);
-    LIR* OpMovMemReg(RegStorage r_base, int offset, RegStorage r_src, MoveType move_type);
-    LIR* OpCondRegReg(OpKind op, ConditionCode cc, RegStorage r_dest, RegStorage r_src);
-    LIR* OpRegRegImm64(OpKind op, RegStorage r_dest, RegStorage r_src1, int64_t value);
-    LIR* OpRegRegImm(OpKind op, RegStorage r_dest, RegStorage r_src1, int value);
-    LIR* OpRegRegReg(OpKind op, RegStorage r_dest, RegStorage r_src1, RegStorage r_src2);
-    LIR* OpTestSuspend(LIR* target);
-    LIR* OpVldm(RegStorage r_base, int count);
-    LIR* OpVstm(RegStorage r_base, int count);
-    void OpRegCopyWide(RegStorage dest, RegStorage src);
+  bool InexpensiveConstantInt(int32_t value) OVERRIDE;
+  bool InexpensiveConstantInt(int32_t value, Instruction::Code opcode) OVERRIDE;
+  bool InexpensiveConstantFloat(int32_t value) OVERRIDE;
+  bool InexpensiveConstantLong(int64_t value) OVERRIDE;
+  bool InexpensiveConstantDouble(int64_t value) OVERRIDE;
 
-    LIR* LoadBaseDispBody(RegStorage r_base, int displacement, RegStorage r_dest, OpSize size);
-    LIR* StoreBaseDispBody(RegStorage r_base, int displacement, RegStorage r_src, OpSize size);
-    LIR* OpRegRegRegShift(OpKind op, RegStorage r_dest, RegStorage r_src1, RegStorage r_src2,
-                          int shift);
-    LIR* OpRegRegRegExtend(OpKind op, RegStorage r_dest, RegStorage r_src1, RegStorage r_src2,
-                           A64RegExtEncodings ext, uint8_t amount);
-    LIR* OpRegRegShift(OpKind op, RegStorage r_dest_src1, RegStorage r_src2, int shift);
-    LIR* OpRegRegExtend(OpKind op, RegStorage r_dest_src1, RegStorage r_src2, int shift);
-    static const ArmEncodingMap EncodingMap[kA64Last];
-    int EncodeShift(int code, int amount);
-    int EncodeExtend(int extend_type, int amount);
-    bool IsExtendEncoding(int encoded_value);
-    int EncodeLogicalImmediate(bool is_wide, uint64_t value);
-    uint64_t DecodeLogicalImmediate(bool is_wide, int value);
+  void FlushIns(RegLocation* ArgLocs, RegLocation rl_method) OVERRIDE;
 
-    ArmConditionCode ArmConditionEncoding(ConditionCode code);
-    bool InexpensiveConstantInt(int32_t value);
-    bool InexpensiveConstantFloat(int32_t value);
-    bool InexpensiveConstantLong(int64_t value);
-    bool InexpensiveConstantDouble(int64_t value);
-
-    void FlushIns(RegLocation* ArgLocs, RegLocation rl_method);
-
-    int GenDalvikArgsNoRange(CallInfo* info, int call_state, LIR** pcrLabel,
-                             NextCallInsn next_call_insn,
-                             const MethodReference& target_method,
-                             uint32_t vtable_idx,
-                             uintptr_t direct_code, uintptr_t direct_method, InvokeType type,
-                             bool skip_this);
-
-    int GenDalvikArgsRange(CallInfo* info, int call_state, LIR** pcrLabel,
+  int GenDalvikArgsNoRange(CallInfo* info, int call_state, LIR** pcrLabel,
                            NextCallInsn next_call_insn,
                            const MethodReference& target_method,
                            uint32_t vtable_idx,
                            uintptr_t direct_code, uintptr_t direct_method, InvokeType type,
-                           bool skip_this);
-    InToRegStorageMapping in_to_reg_storage_mapping_;
+                           bool skip_this) OVERRIDE;
 
-    bool WideGPRsAreAliases() OVERRIDE {
-      return true;  // 64b architecture.
-    }
-    bool WideFPRsAreAliases() OVERRIDE {
-      return true;  // 64b architecture.
-    }
+  int GenDalvikArgsRange(CallInfo* info, int call_state, LIR** pcrLabel,
+                         NextCallInsn next_call_insn,
+                         const MethodReference& target_method,
+                         uint32_t vtable_idx,
+                         uintptr_t direct_code, uintptr_t direct_method, InvokeType type,
+                         bool skip_this) OVERRIDE;
 
-    LIR* InvokeTrampoline(OpKind op, RegStorage r_tgt, QuickEntrypointEnum trampoline) OVERRIDE;
+  bool WideGPRsAreAliases() OVERRIDE {
+    return true;  // 64b architecture.
+  }
+  bool WideFPRsAreAliases() OVERRIDE {
+    return true;  // 64b architecture.
+  }
 
-  private:
-    /**
-     * @brief Given register xNN (dNN), returns register wNN (sNN).
-     * @param reg #RegStorage containing a Solo64 input register (e.g. @c x1 or @c d2).
-     * @return A Solo32 with the same register number as the @p reg (e.g. @c w1 or @c s2).
-     * @see As64BitReg
-     */
-    RegStorage As32BitReg(RegStorage reg) {
-      DCHECK(!reg.IsPair());
-      if ((kFailOnSizeError || kReportSizeError) && !reg.Is64Bit()) {
-        if (kFailOnSizeError) {
-          LOG(FATAL) << "Expected 64b register";
-        } else {
-          LOG(WARNING) << "Expected 64b register";
-          return reg;
-        }
+  size_t GetInstructionOffset(LIR* lir) OVERRIDE;
+
+  LIR* InvokeTrampoline(OpKind op, RegStorage r_tgt, QuickEntrypointEnum trampoline) OVERRIDE;
+
+ private:
+  /**
+   * @brief Given register xNN (dNN), returns register wNN (sNN).
+   * @param reg #RegStorage containing a Solo64 input register (e.g. @c x1 or @c d2).
+   * @return A Solo32 with the same register number as the @p reg (e.g. @c w1 or @c s2).
+   * @see As64BitReg
+   */
+  RegStorage As32BitReg(RegStorage reg) {
+    DCHECK(!reg.IsPair());
+    if ((kFailOnSizeError || kReportSizeError) && !reg.Is64Bit()) {
+      if (kFailOnSizeError) {
+        LOG(FATAL) << "Expected 64b register";
+      } else {
+        LOG(WARNING) << "Expected 64b register";
+        return reg;
       }
-      RegStorage ret_val = RegStorage(RegStorage::k32BitSolo,
-                                      reg.GetRawBits() & RegStorage::kRegTypeMask);
-      DCHECK_EQ(GetRegInfo(reg)->FindMatchingView(RegisterInfo::k32SoloStorageMask)
-                               ->GetReg().GetReg(),
-                ret_val.GetReg());
-      return ret_val;
     }
+    RegStorage ret_val = RegStorage(RegStorage::k32BitSolo,
+                                    reg.GetRawBits() & RegStorage::kRegTypeMask);
+    DCHECK_EQ(GetRegInfo(reg)->FindMatchingView(RegisterInfo::k32SoloStorageMask)
+              ->GetReg().GetReg(),
+              ret_val.GetReg());
+    return ret_val;
+  }
 
-    RegStorage Check32BitReg(RegStorage reg) {
-      if ((kFailOnSizeError || kReportSizeError) && !reg.Is32Bit()) {
-        if (kFailOnSizeError) {
-          LOG(FATAL) << "Checked for 32b register";
-        } else {
-          LOG(WARNING) << "Checked for 32b register";
-          return As32BitReg(reg);
-        }
+  RegStorage Check32BitReg(RegStorage reg) {
+    if ((kFailOnSizeError || kReportSizeError) && !reg.Is32Bit()) {
+      if (kFailOnSizeError) {
+        LOG(FATAL) << "Checked for 32b register";
+      } else {
+        LOG(WARNING) << "Checked for 32b register";
+        return As32BitReg(reg);
       }
-      return reg;
     }
+    return reg;
+  }
 
-    /**
-     * @brief Given register wNN (sNN), returns register xNN (dNN).
-     * @param reg #RegStorage containing a Solo32 input register (e.g. @c w1 or @c s2).
-     * @return A Solo64 with the same register number as the @p reg (e.g. @c x1 or @c d2).
-     * @see As32BitReg
-     */
-    RegStorage As64BitReg(RegStorage reg) {
-      DCHECK(!reg.IsPair());
-      if ((kFailOnSizeError || kReportSizeError) && !reg.Is32Bit()) {
-        if (kFailOnSizeError) {
-          LOG(FATAL) << "Expected 32b register";
-        } else {
-          LOG(WARNING) << "Expected 32b register";
-          return reg;
-        }
+  /**
+   * @brief Given register wNN (sNN), returns register xNN (dNN).
+   * @param reg #RegStorage containing a Solo32 input register (e.g. @c w1 or @c s2).
+   * @return A Solo64 with the same register number as the @p reg (e.g. @c x1 or @c d2).
+   * @see As32BitReg
+   */
+  RegStorage As64BitReg(RegStorage reg) {
+    DCHECK(!reg.IsPair());
+    if ((kFailOnSizeError || kReportSizeError) && !reg.Is32Bit()) {
+      if (kFailOnSizeError) {
+        LOG(FATAL) << "Expected 32b register";
+      } else {
+        LOG(WARNING) << "Expected 32b register";
+        return reg;
       }
-      RegStorage ret_val = RegStorage(RegStorage::k64BitSolo,
-                                      reg.GetRawBits() & RegStorage::kRegTypeMask);
-      DCHECK_EQ(GetRegInfo(reg)->FindMatchingView(RegisterInfo::k64SoloStorageMask)
-                               ->GetReg().GetReg(),
-                ret_val.GetReg());
-      return ret_val;
     }
+    RegStorage ret_val = RegStorage(RegStorage::k64BitSolo,
+                                    reg.GetRawBits() & RegStorage::kRegTypeMask);
+    DCHECK_EQ(GetRegInfo(reg)->FindMatchingView(RegisterInfo::k64SoloStorageMask)
+              ->GetReg().GetReg(),
+              ret_val.GetReg());
+    return ret_val;
+  }
 
-    RegStorage Check64BitReg(RegStorage reg) {
-      if ((kFailOnSizeError || kReportSizeError) && !reg.Is64Bit()) {
-        if (kFailOnSizeError) {
-          LOG(FATAL) << "Checked for 64b register";
-        } else {
-          LOG(WARNING) << "Checked for 64b register";
-          return As64BitReg(reg);
-        }
+  RegStorage Check64BitReg(RegStorage reg) {
+    if ((kFailOnSizeError || kReportSizeError) && !reg.Is64Bit()) {
+      if (kFailOnSizeError) {
+        LOG(FATAL) << "Checked for 64b register";
+      } else {
+        LOG(WARNING) << "Checked for 64b register";
+        return As64BitReg(reg);
       }
-      return reg;
     }
+    return reg;
+  }
 
-    LIR* LoadFPConstantValue(RegStorage r_dest, int32_t value);
-    LIR* LoadFPConstantValueWide(RegStorage r_dest, int64_t value);
-    void ReplaceFixup(LIR* prev_lir, LIR* orig_lir, LIR* new_lir);
-    void InsertFixupBefore(LIR* prev_lir, LIR* orig_lir, LIR* new_lir);
-    void AssignDataOffsets();
-    RegLocation GenDivRem(RegLocation rl_dest, RegLocation rl_src1, RegLocation rl_src2,
-                          bool is_div, bool check_zero);
-    RegLocation GenDivRemLit(RegLocation rl_dest, RegLocation rl_src1, int lit, bool is_div);
+  int32_t EncodeImmSingle(uint32_t bits);
+  int32_t EncodeImmDouble(uint64_t bits);
+  LIR* LoadFPConstantValue(RegStorage r_dest, int32_t value);
+  LIR* LoadFPConstantValueWide(RegStorage r_dest, int64_t value);
+  void ReplaceFixup(LIR* prev_lir, LIR* orig_lir, LIR* new_lir);
+  void InsertFixupBefore(LIR* prev_lir, LIR* orig_lir, LIR* new_lir);
+  void AssignDataOffsets();
+  RegLocation GenDivRem(RegLocation rl_dest, RegLocation rl_src1, RegLocation rl_src2,
+                        bool is_div, bool check_zero);
+  RegLocation GenDivRemLit(RegLocation rl_dest, RegLocation rl_src1, int lit, bool is_div);
+  size_t GetLoadStoreSize(LIR* lir);
+
+  bool SmallLiteralDivRem64(Instruction::Code dalvik_opcode, bool is_div, RegLocation rl_src,
+                            RegLocation rl_dest, int64_t lit);
+
+  uint32_t LinkFixupInsns(LIR* head_lir, LIR* tail_lir, CodeOffset offset);
+  int AssignInsnOffsets();
+  void AssignOffsets();
+  uint8_t* EncodeLIRs(uint8_t* write_pos, LIR* lir);
+
+  // Spill core and FP registers. Returns the SP difference: either spill size, or whole
+  // frame size.
+  int SpillRegs(RegStorage base, uint32_t core_reg_mask, uint32_t fp_reg_mask, int frame_size);
+
+  // Unspill core and FP registers.
+  void UnspillRegs(RegStorage base, uint32_t core_reg_mask, uint32_t fp_reg_mask, int frame_size);
+
+  void GenLongOp(OpKind op, RegLocation rl_dest, RegLocation rl_src1, RegLocation rl_src2);
+
+  LIR* OpRegImm64(OpKind op, RegStorage r_dest_src1, int64_t value);
+  LIR* OpRegRegImm64(OpKind op, RegStorage r_dest, RegStorage r_src1, int64_t value);
+
+  LIR* OpRegRegShift(OpKind op, RegStorage r_dest_src1, RegStorage r_src2, int shift);
+  LIR* OpRegRegRegShift(OpKind op, RegStorage r_dest, RegStorage r_src1, RegStorage r_src2,
+                        int shift);
+  int EncodeShift(int code, int amount);
+
+  LIR* OpRegRegExtend(OpKind op, RegStorage r_dest_src1, RegStorage r_src2,
+                      A64RegExtEncodings ext, uint8_t amount);
+  LIR* OpRegRegRegExtend(OpKind op, RegStorage r_dest, RegStorage r_src1, RegStorage r_src2,
+                         A64RegExtEncodings ext, uint8_t amount);
+  int EncodeExtend(int extend_type, int amount);
+  bool IsExtendEncoding(int encoded_value);
+
+  LIR* LoadBaseDispBody(RegStorage r_base, int displacement, RegStorage r_dest, OpSize size);
+  LIR* StoreBaseDispBody(RegStorage r_base, int displacement, RegStorage r_src, OpSize size);
+
+  int EncodeLogicalImmediate(bool is_wide, uint64_t value);
+  uint64_t DecodeLogicalImmediate(bool is_wide, int value);
+  ArmConditionCode ArmConditionEncoding(ConditionCode code);
+
+  // Helper used in the two GenSelect variants.
+  void GenSelect(int32_t left, int32_t right, ConditionCode code, RegStorage rs_dest,
+                 int result_reg_class);
+
+  void GenNotLong(RegLocation rl_dest, RegLocation rl_src);
+  void GenNegLong(RegLocation rl_dest, RegLocation rl_src);
+  void GenDivRemLong(Instruction::Code opcode, RegLocation rl_dest, RegLocation rl_src1,
+                     RegLocation rl_src2, bool is_div);
+
+  InToRegStorageMapping in_to_reg_storage_mapping_;
+  static const ArmEncodingMap EncodingMap[kA64Last];
 };
 
 }  // namespace art
diff --git a/compiler/dex/quick/arm64/fp_arm64.cc b/compiler/dex/quick/arm64/fp_arm64.cc
index ed13c04..d0b2636 100644
--- a/compiler/dex/quick/arm64/fp_arm64.cc
+++ b/compiler/dex/quick/arm64/fp_arm64.cc
@@ -17,6 +17,7 @@
 #include "arm64_lir.h"
 #include "codegen_arm64.h"
 #include "dex/quick/mir_to_lir-inl.h"
+#include "utils.h"
 
 namespace art {
 
@@ -386,6 +387,52 @@
   return true;
 }
 
+bool Arm64Mir2Lir::GenInlinedCeil(CallInfo* info) {
+  RegLocation rl_src = info->args[0];
+  RegLocation rl_dest = InlineTargetWide(info);
+  rl_src = LoadValueWide(rl_src, kFPReg);
+  RegLocation rl_result = EvalLoc(rl_dest, kFPReg, true);
+  NewLIR2(FWIDE(kA64Frintp2ff), rl_result.reg.GetReg(), rl_src.reg.GetReg());
+  StoreValueWide(rl_dest, rl_result);
+  return true;
+}
+
+bool Arm64Mir2Lir::GenInlinedFloor(CallInfo* info) {
+  RegLocation rl_src = info->args[0];
+  RegLocation rl_dest = InlineTargetWide(info);
+  rl_src = LoadValueWide(rl_src, kFPReg);
+  RegLocation rl_result = EvalLoc(rl_dest, kFPReg, true);
+  NewLIR2(FWIDE(kA64Frintm2ff), rl_result.reg.GetReg(), rl_src.reg.GetReg());
+  StoreValueWide(rl_dest, rl_result);
+  return true;
+}
+
+bool Arm64Mir2Lir::GenInlinedRint(CallInfo* info) {
+  RegLocation rl_src = info->args[0];
+  RegLocation rl_dest = InlineTargetWide(info);
+  rl_src = LoadValueWide(rl_src, kFPReg);
+  RegLocation rl_result = EvalLoc(rl_dest, kFPReg, true);
+  NewLIR2(FWIDE(kA64Frintn2ff), rl_result.reg.GetReg(), rl_src.reg.GetReg());
+  StoreValueWide(rl_dest, rl_result);
+  return true;
+}
+
+bool Arm64Mir2Lir::GenInlinedRound(CallInfo* info, bool is_double) {
+  int32_t encoded_imm = EncodeImmSingle(bit_cast<float, uint32_t>(0.5f));
+  ArmOpcode wide = (is_double) ? FWIDE(0) : FUNWIDE(0);
+  RegLocation rl_src = info->args[0];
+  RegLocation rl_dest = (is_double) ? InlineTargetWide(info) : InlineTarget(info);
+  rl_src = (is_double) ? LoadValueWide(rl_src, kFPReg) : LoadValue(rl_src, kFPReg);
+  RegLocation rl_result = EvalLoc(rl_dest, kCoreReg, true);
+  RegStorage r_tmp = (is_double) ? AllocTempDouble() : AllocTempSingle();
+  // 0.5f and 0.5d are encoded in the same way.
+  NewLIR2(kA64Fmov2fI | wide, r_tmp.GetReg(), encoded_imm);
+  NewLIR3(kA64Fadd3fff | wide, rl_src.reg.GetReg(), rl_src.reg.GetReg(), r_tmp.GetReg());
+  NewLIR2((is_double) ? kA64Fcvtms2xS : kA64Fcvtms2ws, rl_result.reg.GetReg(), rl_src.reg.GetReg());
+  (is_double) ? StoreValueWide(rl_dest, rl_result) : StoreValue(rl_dest, rl_result);
+  return true;
+}
+
 bool Arm64Mir2Lir::GenInlinedMinMaxFP(CallInfo* info, bool is_min, bool is_double) {
   DCHECK_EQ(cu_->instruction_set, kArm64);
   int op = (is_min) ? kA64Fmin3fff : kA64Fmax3fff;
diff --git a/compiler/dex/quick/arm64/int_arm64.cc b/compiler/dex/quick/arm64/int_arm64.cc
index f9f85f4..147fee8 100644
--- a/compiler/dex/quick/arm64/int_arm64.cc
+++ b/compiler/dex/quick/arm64/int_arm64.cc
@@ -22,6 +22,7 @@
 #include "dex/reg_storage_eq.h"
 #include "entrypoints/quick/quick_entrypoints.h"
 #include "mirror/array.h"
+#include "utils.h"
 
 namespace art {
 
@@ -788,6 +789,7 @@
 }
 
 LIR* Arm64Mir2Lir::OpPcRelLoad(RegStorage reg, LIR* target) {
+  ScopedMemRefType mem_ref_type(this, ResourceMask::kLiteral);
   return RawLIR(current_dalvik_offset_, WIDE(kA64Ldr2rp), reg.GetReg(), 0, 0, 0, 0, target);
 }
 
@@ -929,34 +931,52 @@
   StoreValueWide(rl_dest, rl_result);
 }
 
-void Arm64Mir2Lir::GenMulLong(Instruction::Code opcode, RegLocation rl_dest,
-                              RegLocation rl_src1, RegLocation rl_src2) {
-  GenLongOp(kOpMul, rl_dest, rl_src1, rl_src2);
-}
-
-void Arm64Mir2Lir::GenAddLong(Instruction::Code opcode, RegLocation rl_dest, RegLocation rl_src1,
-                              RegLocation rl_src2) {
-  GenLongOp(kOpAdd, rl_dest, rl_src1, rl_src2);
-}
-
-void Arm64Mir2Lir::GenSubLong(Instruction::Code opcode, RegLocation rl_dest, RegLocation rl_src1,
-                            RegLocation rl_src2) {
-  GenLongOp(kOpSub, rl_dest, rl_src1, rl_src2);
-}
-
-void Arm64Mir2Lir::GenAndLong(Instruction::Code opcode, RegLocation rl_dest, RegLocation rl_src1,
-                            RegLocation rl_src2) {
-  GenLongOp(kOpAnd, rl_dest, rl_src1, rl_src2);
-}
-
-void Arm64Mir2Lir::GenOrLong(Instruction::Code opcode, RegLocation rl_dest, RegLocation rl_src1,
-                           RegLocation rl_src2) {
-  GenLongOp(kOpOr, rl_dest, rl_src1, rl_src2);
-}
-
-void Arm64Mir2Lir::GenXorLong(Instruction::Code opcode, RegLocation rl_dest, RegLocation rl_src1,
-                            RegLocation rl_src2) {
-  GenLongOp(kOpXor, rl_dest, rl_src1, rl_src2);
+void Arm64Mir2Lir::GenArithOpLong(Instruction::Code opcode, RegLocation rl_dest,
+                                  RegLocation rl_src1, RegLocation rl_src2) {
+  switch (opcode) {
+    case Instruction::NOT_LONG:
+      GenNotLong(rl_dest, rl_src2);
+      return;
+    case Instruction::ADD_LONG:
+    case Instruction::ADD_LONG_2ADDR:
+      GenLongOp(kOpAdd, rl_dest, rl_src1, rl_src2);
+      return;
+    case Instruction::SUB_LONG:
+    case Instruction::SUB_LONG_2ADDR:
+      GenLongOp(kOpSub, rl_dest, rl_src1, rl_src2);
+      return;
+    case Instruction::MUL_LONG:
+    case Instruction::MUL_LONG_2ADDR:
+      GenLongOp(kOpMul, rl_dest, rl_src1, rl_src2);
+      return;
+    case Instruction::DIV_LONG:
+    case Instruction::DIV_LONG_2ADDR:
+      GenDivRemLong(opcode, rl_dest, rl_src1, rl_src2, /*is_div*/ true);
+      return;
+    case Instruction::REM_LONG:
+    case Instruction::REM_LONG_2ADDR:
+      GenDivRemLong(opcode, rl_dest, rl_src1, rl_src2, /*is_div*/ false);
+      return;
+    case Instruction::AND_LONG_2ADDR:
+    case Instruction::AND_LONG:
+      GenLongOp(kOpAnd, rl_dest, rl_src1, rl_src2);
+      return;
+    case Instruction::OR_LONG:
+    case Instruction::OR_LONG_2ADDR:
+      GenLongOp(kOpOr, rl_dest, rl_src1, rl_src2);
+      return;
+    case Instruction::XOR_LONG:
+    case Instruction::XOR_LONG_2ADDR:
+      GenLongOp(kOpXor, rl_dest, rl_src1, rl_src2);
+      return;
+    case Instruction::NEG_LONG: {
+      GenNegLong(rl_dest, rl_src2);
+      return;
+    }
+    default:
+      LOG(FATAL) << "Invalid long arith op";
+      return;
+  }
 }
 
 /*
@@ -1190,22 +1210,7 @@
 
 void Arm64Mir2Lir::GenArithImmOpLong(Instruction::Code opcode, RegLocation rl_dest,
                                      RegLocation rl_src1, RegLocation rl_src2) {
-  if ((opcode == Instruction::SUB_LONG) || (opcode == Instruction::SUB_LONG_2ADDR)) {
-    if (!rl_src2.is_const) {
-      return GenArithOpLong(opcode, rl_dest, rl_src1, rl_src2);
-    }
-  } else {
-    // Associativity.
-    if (!rl_src2.is_const) {
-      DCHECK(rl_src1.is_const);
-      std::swap(rl_src1, rl_src2);
-    }
-  }
-  DCHECK(rl_src2.is_const);
-
   OpKind op = kOpBkpt;
-  int64_t val = mir_graph_->ConstantValueWide(rl_src2);
-
   switch (opcode) {
     case Instruction::ADD_LONG:
     case Instruction::ADD_LONG_2ADDR:
@@ -1231,12 +1236,34 @@
       LOG(FATAL) << "Unexpected opcode";
   }
 
+  if (op == kOpSub) {
+    if (!rl_src2.is_const) {
+      return GenArithOpLong(opcode, rl_dest, rl_src1, rl_src2);
+    }
+  } else {
+    // Associativity.
+    if (!rl_src2.is_const) {
+      DCHECK(rl_src1.is_const);
+      std::swap(rl_src1, rl_src2);
+    }
+  }
+  DCHECK(rl_src2.is_const);
+  int64_t val = mir_graph_->ConstantValueWide(rl_src2);
+
   rl_src1 = LoadValueWide(rl_src1, kCoreReg);
   RegLocation rl_result = EvalLocWide(rl_dest, kCoreReg, true);
   OpRegRegImm64(op, rl_result.reg, rl_src1.reg, val);
   StoreValueWide(rl_dest, rl_result);
 }
 
+static uint32_t ExtractReg(uint32_t reg_mask, int* reg) {
+  // Find first register.
+  int first_bit_set = CTZ(reg_mask) + 1;
+  *reg = *reg + first_bit_set;
+  reg_mask >>= first_bit_set;
+  return reg_mask;
+}
+
 /**
  * @brief Split a register list in pairs or registers.
  *
@@ -1253,15 +1280,15 @@
  *   }
  * @endcode
  */
-uint32_t Arm64Mir2Lir::GenPairWise(uint32_t reg_mask, int* reg1, int* reg2) {
+static uint32_t GenPairWise(uint32_t reg_mask, int* reg1, int* reg2) {
   // Find first register.
-  int first_bit_set = __builtin_ctz(reg_mask) + 1;
+  int first_bit_set = CTZ(reg_mask) + 1;
   int reg = *reg1 + first_bit_set;
   reg_mask >>= first_bit_set;
 
   if (LIKELY(reg_mask)) {
     // Save the first register, find the second and use the pair opcode.
-    int second_bit_set = __builtin_ctz(reg_mask) + 1;
+    int second_bit_set = CTZ(reg_mask) + 1;
     *reg2 = reg;
     reg_mask >>= second_bit_set;
     *reg1 = reg + second_bit_set;
@@ -1274,68 +1301,274 @@
   return reg_mask;
 }
 
-void Arm64Mir2Lir::UnSpillCoreRegs(RegStorage base, int offset, uint32_t reg_mask) {
-  int reg1 = -1, reg2 = -1;
-  const int reg_log2_size = 3;
-
-  for (offset = (offset >> reg_log2_size); reg_mask; offset += 2) {
-     reg_mask = GenPairWise(reg_mask, & reg1, & reg2);
-    if (UNLIKELY(reg2 < 0)) {
-      NewLIR3(WIDE(kA64Ldr3rXD), RegStorage::Solo64(reg1).GetReg(), base.GetReg(), offset);
-    } else {
-      DCHECK_LE(offset, 63);
-      NewLIR4(WIDE(kA64Ldp4rrXD), RegStorage::Solo64(reg2).GetReg(),
-              RegStorage::Solo64(reg1).GetReg(), base.GetReg(), offset);
-    }
-  }
-}
-
-void Arm64Mir2Lir::SpillCoreRegs(RegStorage base, int offset, uint32_t reg_mask) {
+static void SpillCoreRegs(Arm64Mir2Lir* m2l, RegStorage base, int offset, uint32_t reg_mask) {
   int reg1 = -1, reg2 = -1;
   const int reg_log2_size = 3;
 
   for (offset = (offset >> reg_log2_size); reg_mask; offset += 2) {
     reg_mask = GenPairWise(reg_mask, & reg1, & reg2);
     if (UNLIKELY(reg2 < 0)) {
-      NewLIR3(WIDE(kA64Str3rXD), RegStorage::Solo64(reg1).GetReg(), base.GetReg(), offset);
+      m2l->NewLIR3(WIDE(kA64Str3rXD), RegStorage::Solo64(reg1).GetReg(), base.GetReg(), offset);
     } else {
-      NewLIR4(WIDE(kA64Stp4rrXD), RegStorage::Solo64(reg2).GetReg(),
-              RegStorage::Solo64(reg1).GetReg(), base.GetReg(), offset);
-    }
-  }
-}
-
-void Arm64Mir2Lir::UnSpillFPRegs(RegStorage base, int offset, uint32_t reg_mask) {
-  int reg1 = -1, reg2 = -1;
-  const int reg_log2_size = 3;
-
-  for (offset = (offset >> reg_log2_size); reg_mask; offset += 2) {
-     reg_mask = GenPairWise(reg_mask, & reg1, & reg2);
-    if (UNLIKELY(reg2 < 0)) {
-      NewLIR3(FWIDE(kA64Ldr3fXD), RegStorage::FloatSolo64(reg1).GetReg(), base.GetReg(), offset);
-    } else {
-      NewLIR4(WIDE(kA64Ldp4ffXD), RegStorage::FloatSolo64(reg2).GetReg(),
-              RegStorage::FloatSolo64(reg1).GetReg(), base.GetReg(), offset);
+      m2l->NewLIR4(WIDE(kA64Stp4rrXD), RegStorage::Solo64(reg2).GetReg(),
+                   RegStorage::Solo64(reg1).GetReg(), base.GetReg(), offset);
     }
   }
 }
 
 // TODO(Arm64): consider using ld1 and st1?
-void Arm64Mir2Lir::SpillFPRegs(RegStorage base, int offset, uint32_t reg_mask) {
+static void SpillFPRegs(Arm64Mir2Lir* m2l, RegStorage base, int offset, uint32_t reg_mask) {
   int reg1 = -1, reg2 = -1;
   const int reg_log2_size = 3;
 
   for (offset = (offset >> reg_log2_size); reg_mask; offset += 2) {
     reg_mask = GenPairWise(reg_mask, & reg1, & reg2);
     if (UNLIKELY(reg2 < 0)) {
-      NewLIR3(FWIDE(kA64Str3fXD), RegStorage::FloatSolo64(reg1).GetReg(), base.GetReg(), offset);
+      m2l->NewLIR3(FWIDE(kA64Str3fXD), RegStorage::FloatSolo64(reg1).GetReg(), base.GetReg(),
+                   offset);
     } else {
-      NewLIR4(WIDE(kA64Stp4ffXD), RegStorage::FloatSolo64(reg2).GetReg(),
-              RegStorage::FloatSolo64(reg1).GetReg(), base.GetReg(), offset);
+      m2l->NewLIR4(WIDE(kA64Stp4ffXD), RegStorage::FloatSolo64(reg2).GetReg(),
+                   RegStorage::FloatSolo64(reg1).GetReg(), base.GetReg(), offset);
     }
   }
 }
 
+static int SpillRegsPreSub(Arm64Mir2Lir* m2l, RegStorage base, uint32_t core_reg_mask,
+                           uint32_t fp_reg_mask, int frame_size) {
+  m2l->OpRegRegImm(kOpSub, rs_sp, rs_sp, frame_size);
+
+  int core_count = POPCOUNT(core_reg_mask);
+
+  if (fp_reg_mask != 0) {
+    // Spill FP regs.
+    int fp_count = POPCOUNT(fp_reg_mask);
+    int spill_offset = frame_size - (core_count + fp_count) * kArm64PointerSize;
+    SpillFPRegs(m2l, rs_sp, spill_offset, fp_reg_mask);
+  }
+
+  if (core_reg_mask != 0) {
+    // Spill core regs.
+    int spill_offset = frame_size - (core_count * kArm64PointerSize);
+    SpillCoreRegs(m2l, rs_sp, spill_offset, core_reg_mask);
+  }
+
+  return frame_size;
+}
+
+static int SpillRegsPreIndexed(Arm64Mir2Lir* m2l, RegStorage base, uint32_t core_reg_mask,
+                               uint32_t fp_reg_mask, int frame_size) {
+  // Otherwise, spill both core and fp regs at the same time.
+  // The very first instruction will be an stp with pre-indexed address, moving the stack pointer
+  // down. From then on, we fill upwards. This will generate overall the same number of instructions
+  // as the specialized code above in most cases (exception being odd number of core and even
+  // non-zero fp spills), but is more flexible, as the offsets are guaranteed small.
+  //
+  // Some demonstrative fill cases : (c) = core, (f) = fp
+  // cc    44   cc    44   cc    22   cc    33   fc => 1[1/2]
+  // fc => 23   fc => 23   ff => 11   ff => 22
+  // ff    11    f    11               f    11
+  //
+  int reg1 = -1, reg2 = -1;
+  int core_count = POPCOUNT(core_reg_mask);
+  int fp_count = POPCOUNT(fp_reg_mask);
+
+  int combined = fp_count + core_count;
+  int all_offset = RoundUp(combined, 2);  // Needs to be 16B = 2-reg aligned.
+
+  int cur_offset = 2;  // What's the starting offset after the first stp? We expect the base slot
+                       // to be filled.
+
+  // First figure out whether the bottom is FP or core.
+  if (fp_count > 0) {
+    // Some FP spills.
+    //
+    // Four cases: (d0 is dummy to fill up stp)
+    // 1) Single FP, even number of core -> stp d0, fp_reg
+    // 2) Single FP, odd number of core -> stp fp_reg, d0
+    // 3) More FP, even number combined -> stp fp_reg1, fp_reg2
+    // 4) More FP, odd number combined -> stp d0, fp_reg
+    if (fp_count == 1) {
+      fp_reg_mask = ExtractReg(fp_reg_mask, &reg1);
+      DCHECK_EQ(fp_reg_mask, 0U);
+      if (core_count % 2 == 0) {
+        m2l->NewLIR4(WIDE(kA64StpPre4ffXD),
+                     RegStorage::FloatSolo64(reg1).GetReg(),
+                     RegStorage::FloatSolo64(reg1).GetReg(),
+                     base.GetReg(), -all_offset);
+      } else {
+        m2l->NewLIR4(WIDE(kA64StpPre4ffXD),
+                     RegStorage::FloatSolo64(reg1).GetReg(),
+                     RegStorage::FloatSolo64(reg1).GetReg(),
+                     base.GetReg(), -all_offset);
+        cur_offset = 0;  // That core reg needs to go into the upper half.
+      }
+    } else {
+      if (combined % 2 == 0) {
+        fp_reg_mask = GenPairWise(fp_reg_mask, &reg1, &reg2);
+        m2l->NewLIR4(WIDE(kA64StpPre4ffXD), RegStorage::FloatSolo64(reg2).GetReg(),
+                     RegStorage::FloatSolo64(reg1).GetReg(), base.GetReg(), -all_offset);
+      } else {
+        fp_reg_mask = ExtractReg(fp_reg_mask, &reg1);
+        m2l->NewLIR4(WIDE(kA64StpPre4ffXD), rs_d0.GetReg(), RegStorage::FloatSolo64(reg1).GetReg(),
+                     base.GetReg(), -all_offset);
+      }
+    }
+  } else {
+    // No FP spills.
+    //
+    // Two cases:
+    // 1) Even number of core -> stp core1, core2
+    // 2) Odd number of core -> stp xzr, core1
+    if (core_count % 2 == 1) {
+      core_reg_mask = ExtractReg(core_reg_mask, &reg1);
+      m2l->NewLIR4(WIDE(kA64StpPre4rrXD), rs_xzr.GetReg(),
+                   RegStorage::Solo64(reg1).GetReg(), base.GetReg(), -all_offset);
+    } else {
+      core_reg_mask = GenPairWise(core_reg_mask, &reg1, &reg2);
+      m2l->NewLIR4(WIDE(kA64StpPre4rrXD), RegStorage::Solo64(reg2).GetReg(),
+                   RegStorage::Solo64(reg1).GetReg(), base.GetReg(), -all_offset);
+    }
+  }
+
+  if (fp_count != 0) {
+    for (; fp_reg_mask != 0;) {
+      // Have some FP regs to do.
+      fp_reg_mask = GenPairWise(fp_reg_mask, &reg1, &reg2);
+      if (UNLIKELY(reg2 < 0)) {
+        m2l->NewLIR3(FWIDE(kA64Str3fXD), RegStorage::FloatSolo64(reg1).GetReg(), base.GetReg(),
+                     cur_offset);
+        // Do not increment offset here, as the second half will be filled by a core reg.
+      } else {
+        m2l->NewLIR4(WIDE(kA64Stp4ffXD), RegStorage::FloatSolo64(reg2).GetReg(),
+                     RegStorage::FloatSolo64(reg1).GetReg(), base.GetReg(), cur_offset);
+        cur_offset += 2;
+      }
+    }
+
+    // Reset counting.
+    reg1 = -1;
+
+    // If there is an odd number of core registers, we need to store the bottom now.
+    if (core_count % 2 == 1) {
+      core_reg_mask = ExtractReg(core_reg_mask, &reg1);
+      m2l->NewLIR3(WIDE(kA64Str3rXD), RegStorage::Solo64(reg1).GetReg(), base.GetReg(),
+                   cur_offset + 1);
+      cur_offset += 2;  // Half-slot filled now.
+    }
+  }
+
+  // Spill the rest of the core regs. They are guaranteed to be even.
+  DCHECK_EQ(POPCOUNT(core_reg_mask) % 2, 0);
+  for (; core_reg_mask != 0; cur_offset += 2) {
+    core_reg_mask = GenPairWise(core_reg_mask, &reg1, &reg2);
+    m2l->NewLIR4(WIDE(kA64Stp4rrXD), RegStorage::Solo64(reg2).GetReg(),
+                 RegStorage::Solo64(reg1).GetReg(), base.GetReg(), cur_offset);
+  }
+
+  DCHECK_EQ(cur_offset, all_offset);
+
+  return all_offset * 8;
+}
+
+int Arm64Mir2Lir::SpillRegs(RegStorage base, uint32_t core_reg_mask, uint32_t fp_reg_mask,
+                            int frame_size) {
+  // If the frame size is small enough that all offsets would fit into the immediates, use that
+  // setup, as it decrements sp early (kind of instruction scheduling), and is not worse
+  // instruction-count wise than the complicated code below.
+  //
+  // This case is also optimal when we have an odd number of core spills, and an even (non-zero)
+  // number of fp spills.
+  if ((RoundUp(frame_size, 8) / 8 <= 63)) {
+    return SpillRegsPreSub(this, base, core_reg_mask, fp_reg_mask, frame_size);
+  } else {
+    return SpillRegsPreIndexed(this, base, core_reg_mask, fp_reg_mask, frame_size);
+  }
+}
+
+static void UnSpillCoreRegs(Arm64Mir2Lir* m2l, RegStorage base, int offset, uint32_t reg_mask) {
+  int reg1 = -1, reg2 = -1;
+  const int reg_log2_size = 3;
+
+  for (offset = (offset >> reg_log2_size); reg_mask; offset += 2) {
+    reg_mask = GenPairWise(reg_mask, & reg1, & reg2);
+    if (UNLIKELY(reg2 < 0)) {
+      m2l->NewLIR3(WIDE(kA64Ldr3rXD), RegStorage::Solo64(reg1).GetReg(), base.GetReg(), offset);
+    } else {
+      DCHECK_LE(offset, 63);
+      m2l->NewLIR4(WIDE(kA64Ldp4rrXD), RegStorage::Solo64(reg2).GetReg(),
+                   RegStorage::Solo64(reg1).GetReg(), base.GetReg(), offset);
+    }
+  }
+}
+
+static void UnSpillFPRegs(Arm64Mir2Lir* m2l, RegStorage base, int offset, uint32_t reg_mask) {
+  int reg1 = -1, reg2 = -1;
+  const int reg_log2_size = 3;
+
+  for (offset = (offset >> reg_log2_size); reg_mask; offset += 2) {
+     reg_mask = GenPairWise(reg_mask, & reg1, & reg2);
+    if (UNLIKELY(reg2 < 0)) {
+      m2l->NewLIR3(FWIDE(kA64Ldr3fXD), RegStorage::FloatSolo64(reg1).GetReg(), base.GetReg(),
+                   offset);
+    } else {
+      m2l->NewLIR4(WIDE(kA64Ldp4ffXD), RegStorage::FloatSolo64(reg2).GetReg(),
+                   RegStorage::FloatSolo64(reg1).GetReg(), base.GetReg(), offset);
+    }
+  }
+}
+
+void Arm64Mir2Lir::UnspillRegs(RegStorage base, uint32_t core_reg_mask, uint32_t fp_reg_mask,
+                               int frame_size) {
+  // Restore saves and drop stack frame.
+  // 2 versions:
+  //
+  // 1. (Original): Try to address directly, then drop the whole frame.
+  //                Limitation: ldp is a 7b signed immediate.
+  //
+  // 2. (New): Drop the non-save-part. Then do similar to original, which is now guaranteed to be
+  //           in range. Then drop the rest.
+  //
+  // TODO: In methods with few spills but huge frame, it would be better to do non-immediate loads
+  //       in variant 1.
+
+  // "Magic" constant, 63 (max signed 7b) * 8.
+  static constexpr int kMaxFramesizeForOffset = 63 * kArm64PointerSize;
+
+  const int num_core_spills = POPCOUNT(core_reg_mask);
+  const int num_fp_spills = POPCOUNT(fp_reg_mask);
+
+  int early_drop = 0;
+
+  if (frame_size > kMaxFramesizeForOffset) {
+    // Second variant. Drop the frame part.
+
+    // TODO: Always use the first formula, as num_fp_spills would be zero?
+    if (fp_reg_mask != 0) {
+      early_drop = frame_size - kArm64PointerSize * (num_fp_spills + num_core_spills);
+    } else {
+      early_drop = frame_size - kArm64PointerSize * num_core_spills;
+    }
+
+    // Drop needs to be 16B aligned, so that SP keeps aligned.
+    early_drop = RoundDown(early_drop, 16);
+
+    OpRegImm64(kOpAdd, rs_sp, early_drop);
+  }
+
+  // Unspill.
+  if (fp_reg_mask != 0) {
+    int offset = frame_size - early_drop - kArm64PointerSize * (num_fp_spills + num_core_spills);
+    UnSpillFPRegs(this, rs_sp, offset, fp_reg_mask);
+  }
+  if (core_reg_mask != 0) {
+    int offset = frame_size - early_drop - kArm64PointerSize * num_core_spills;
+    UnSpillCoreRegs(this, rs_sp, offset, core_reg_mask);
+  }
+
+  // Drop the (rest of) the frame.
+  OpRegImm64(kOpAdd, rs_sp, frame_size - early_drop);
+}
+
 bool Arm64Mir2Lir::GenInlinedReverseBits(CallInfo* info, OpSize size) {
   ArmOpcode wide = (size == k64) ? WIDE(0) : UNWIDE(0);
   RegLocation rl_src_i = info->args[0];
diff --git a/compiler/dex/quick/arm64/utility_arm64.cc b/compiler/dex/quick/arm64/utility_arm64.cc
index f6c140f..5326e74 100644
--- a/compiler/dex/quick/arm64/utility_arm64.cc
+++ b/compiler/dex/quick/arm64/utility_arm64.cc
@@ -23,7 +23,7 @@
 
 /* This file contains codegen for the A64 ISA. */
 
-static int32_t EncodeImmSingle(uint32_t bits) {
+int32_t Arm64Mir2Lir::EncodeImmSingle(uint32_t bits) {
   /*
    * Valid values will have the form:
    *
@@ -55,7 +55,7 @@
   return (bit7 | bit6 | bit5_to_0);
 }
 
-static int32_t EncodeImmDouble(uint64_t bits) {
+int32_t Arm64Mir2Lir::EncodeImmDouble(uint64_t bits) {
   /*
    * Valid values will have the form:
    *
@@ -87,6 +87,26 @@
   return (bit7 | bit6 | bit5_to_0);
 }
 
+size_t Arm64Mir2Lir::GetLoadStoreSize(LIR* lir) {
+  bool opcode_is_wide = IS_WIDE(lir->opcode);
+  ArmOpcode opcode = UNWIDE(lir->opcode);
+  DCHECK(!IsPseudoLirOp(opcode));
+  const ArmEncodingMap *encoder = &EncodingMap[opcode];
+  uint32_t bits = opcode_is_wide ? encoder->xskeleton : encoder->wskeleton;
+  return (bits >> 30);
+}
+
+size_t Arm64Mir2Lir::GetInstructionOffset(LIR* lir) {
+  size_t offset = lir->operands[2];
+  uint64_t check_flags = GetTargetInstFlags(lir->opcode);
+  DCHECK((check_flags & IS_LOAD) || (check_flags & IS_STORE));
+  if (check_flags & SCALED_OFFSET_X0) {
+    DCHECK(check_flags & IS_TERTIARY_OP);
+    offset = offset * (1 << GetLoadStoreSize(lir));
+  }
+  return offset;
+}
+
 LIR* Arm64Mir2Lir::LoadFPConstantValue(RegStorage r_dest, int32_t value) {
   DCHECK(r_dest.IsSingle());
   if (value == 0) {
@@ -249,8 +269,47 @@
   return (n << 12 | imm_r << 6 | imm_s);
 }
 
+// Maximum number of instructions to use for encoding the immediate.
+static const int max_num_ops_per_const_load = 2;
+
+/**
+ * @brief Return the number of fast halfwords in the given uint64_t integer.
+ * @details The input integer is split into 4 halfwords (bits 0-15, 16-31, 32-47, 48-63). The
+ *   number of fast halfwords (halfwords that are either 0 or 0xffff) is returned. See below for
+ *   a more accurate description.
+ * @param value The input 64-bit integer.
+ * @return Return @c retval such that (retval & 0x7) is the maximum between n and m, where n is
+ *   the number of halfwords with all bits unset (0) and m is the number of halfwords with all bits
+ *   set (0xffff). Additionally (retval & 0x8) is set when m > n.
+ */
+static int GetNumFastHalfWords(uint64_t value) {
+  unsigned int num_0000_halfwords = 0;
+  unsigned int num_ffff_halfwords = 0;
+  for (int shift = 0; shift < 64; shift += 16) {
+    uint16_t halfword = static_cast<uint16_t>(value >> shift);
+    if (halfword == 0)
+      num_0000_halfwords++;
+    else if (halfword == UINT16_C(0xffff))
+      num_ffff_halfwords++;
+  }
+  if (num_0000_halfwords >= num_ffff_halfwords) {
+    DCHECK_LE(num_0000_halfwords, 4U);
+    return num_0000_halfwords;
+  } else {
+    DCHECK_LE(num_ffff_halfwords, 4U);
+    return num_ffff_halfwords | 0x8;
+  }
+}
+
+// The InexpensiveConstantXXX variants below are used in the promotion algorithm to determine how a
+// constant is considered for promotion. If the constant is "inexpensive" then the promotion
+// algorithm will give it a low priority for promotion, even when it is referenced many times in
+// the code.
+
 bool Arm64Mir2Lir::InexpensiveConstantInt(int32_t value) {
-  return false;  // (ModifiedImmediate(value) >= 0) || (ModifiedImmediate(~value) >= 0);
+  // A 32-bit int can always be loaded with 2 instructions (and without using the literal pool).
+  // We therefore return true and give it a low priority for promotion.
+  return true;
 }
 
 bool Arm64Mir2Lir::InexpensiveConstantFloat(int32_t value) {
@@ -258,13 +317,70 @@
 }
 
 bool Arm64Mir2Lir::InexpensiveConstantLong(int64_t value) {
-  return InexpensiveConstantInt(High32Bits(value)) && InexpensiveConstantInt(Low32Bits(value));
+  int num_slow_halfwords = 4 - (GetNumFastHalfWords(value) & 0x7);
+  if (num_slow_halfwords <= max_num_ops_per_const_load) {
+    return true;
+  }
+  return (EncodeLogicalImmediate(/*is_wide=*/true, value) >= 0);
 }
 
 bool Arm64Mir2Lir::InexpensiveConstantDouble(int64_t value) {
   return EncodeImmDouble(value) >= 0;
 }
 
+// The InexpensiveConstantXXX variants below are used to determine which A64 instructions to use
+// when one of the operands is an immediate (e.g. register version or immediate version of add).
+
+bool Arm64Mir2Lir::InexpensiveConstantInt(int32_t value, Instruction::Code opcode) {
+  switch (opcode) {
+  case Instruction::IF_EQ:
+  case Instruction::IF_NE:
+  case Instruction::IF_LT:
+  case Instruction::IF_GE:
+  case Instruction::IF_GT:
+  case Instruction::IF_LE:
+  case Instruction::ADD_INT:
+  case Instruction::ADD_INT_2ADDR:
+  case Instruction::SUB_INT:
+  case Instruction::SUB_INT_2ADDR:
+    // The code below is consistent with the implementation of OpRegRegImm().
+    {
+      int32_t abs_value = std::abs(value);
+      if (abs_value < 0x1000) {
+        return true;
+      } else if ((abs_value & UINT64_C(0xfff)) == 0 && ((abs_value >> 12) < 0x1000)) {
+        return true;
+      }
+      return false;
+    }
+  case Instruction::SHL_INT:
+  case Instruction::SHL_INT_2ADDR:
+  case Instruction::SHR_INT:
+  case Instruction::SHR_INT_2ADDR:
+  case Instruction::USHR_INT:
+  case Instruction::USHR_INT_2ADDR:
+    return true;
+  case Instruction::AND_INT:
+  case Instruction::AND_INT_2ADDR:
+  case Instruction::AND_INT_LIT16:
+  case Instruction::AND_INT_LIT8:
+  case Instruction::OR_INT:
+  case Instruction::OR_INT_2ADDR:
+  case Instruction::OR_INT_LIT16:
+  case Instruction::OR_INT_LIT8:
+  case Instruction::XOR_INT:
+  case Instruction::XOR_INT_2ADDR:
+  case Instruction::XOR_INT_LIT16:
+  case Instruction::XOR_INT_LIT8:
+    if (value == 0 || value == INT32_C(-1)) {
+      return true;
+    }
+    return (EncodeLogicalImmediate(/*is_wide=*/false, value) >= 0);
+  default:
+    return false;
+  }
+}
+
 /*
  * Load a immediate using one single instruction when possible; otherwise
  * use a pair of movz and movk instructions.
@@ -338,9 +454,6 @@
 
 // TODO: clean up the names. LoadConstantWide() should really be LoadConstantNoClobberWide().
 LIR* Arm64Mir2Lir::LoadConstantWide(RegStorage r_dest, int64_t value) {
-  // Maximum number of instructions to use for encoding the immediate.
-  const int max_num_ops = 2;
-
   if (r_dest.IsFloat()) {
     return LoadFPConstantValueWide(r_dest, value);
   }
@@ -358,19 +471,12 @@
   }
 
   // At least one in value's halfwords is not 0x0, nor 0xffff: find out how many.
-  int num_0000_halfwords = 0;
-  int num_ffff_halfwords = 0;
   uint64_t uvalue = static_cast<uint64_t>(value);
-  for (int shift = 0; shift < 64; shift += 16) {
-    uint16_t halfword = static_cast<uint16_t>(uvalue >> shift);
-    if (halfword == 0)
-      num_0000_halfwords++;
-    else if (halfword == UINT16_C(0xffff))
-      num_ffff_halfwords++;
-  }
-  int num_fast_halfwords = std::max(num_0000_halfwords, num_ffff_halfwords);
+  int num_fast_halfwords = GetNumFastHalfWords(uvalue);
+  int num_slow_halfwords = 4 - (num_fast_halfwords & 0x7);
+  bool more_ffff_halfwords = (num_fast_halfwords & 0x8) != 0;
 
-  if (num_fast_halfwords < 3) {
+  if (num_slow_halfwords > 1) {
     // A single movz/movn is not enough. Try the logical immediate route.
     int log_imm = EncodeLogicalImmediate(/*is_wide=*/true, value);
     if (log_imm >= 0) {
@@ -378,19 +484,19 @@
     }
   }
 
-  if (num_fast_halfwords >= 4 - max_num_ops) {
+  if (num_slow_halfwords <= max_num_ops_per_const_load) {
     // We can encode the number using a movz/movn followed by one or more movk.
     ArmOpcode op;
     uint16_t background;
     LIR* res = nullptr;
 
     // Decide whether to use a movz or a movn.
-    if (num_0000_halfwords >= num_ffff_halfwords) {
-      op = WIDE(kA64Movz3rdM);
-      background = 0;
-    } else {
+    if (more_ffff_halfwords) {
       op = WIDE(kA64Movn3rdM);
       background = 0xffff;
+    } else {
+      op = WIDE(kA64Movz3rdM);
+      background = 0;
     }
 
     // Emit the first instruction (movz, movn).
@@ -525,7 +631,8 @@
   return NULL;
 }
 
-LIR* Arm64Mir2Lir::OpRegRegExtend(OpKind op, RegStorage r_dest_src1, RegStorage r_src2, int extend) {
+LIR* Arm64Mir2Lir::OpRegRegExtend(OpKind op, RegStorage r_dest_src1, RegStorage r_src2,
+                                  A64RegExtEncodings ext, uint8_t amount) {
   ArmOpcode wide = (r_dest_src1.Is64Bit()) ? WIDE(0) : UNWIDE(0);
   ArmOpcode opcode = kA64Brk1d;
 
@@ -536,6 +643,11 @@
     case kOpCmp:
       opcode = kA64Cmp3Rre;
       break;
+    case kOpAdd:
+      // Note: intentional fallthrough
+    case kOpSub:
+      return OpRegRegRegExtend(op, r_dest_src1, r_dest_src1, r_src2, ext, amount);
+      break;
     default:
       LOG(FATAL) << "Bad Opcode: " << opcode;
       break;
@@ -545,7 +657,8 @@
   if (EncodingMap[opcode].flags & IS_TERTIARY_OP) {
     ArmEncodingKind kind = EncodingMap[opcode].field_loc[2].kind;
     if (kind == kFmtExtend) {
-      return NewLIR3(opcode | wide, r_dest_src1.GetReg(), r_src2.GetReg(), extend);
+      return NewLIR3(opcode | wide, r_dest_src1.GetReg(), r_src2.GetReg(),
+                     EncodeExtend(ext, amount));
     }
   }
 
@@ -555,10 +668,10 @@
 
 LIR* Arm64Mir2Lir::OpRegReg(OpKind op, RegStorage r_dest_src1, RegStorage r_src2) {
   /* RegReg operations with SP in first parameter need extended register instruction form.
-   * Only CMN and CMP instructions are implemented.
+   * Only CMN, CMP, ADD & SUB instructions are implemented.
    */
   if (r_dest_src1 == rs_sp) {
-    return OpRegRegExtend(op, r_dest_src1, r_src2, ENCODE_NO_EXTEND);
+    return OpRegRegExtend(op, r_dest_src1, r_src2, kA64Uxtx, 0);
   } else {
     return OpRegRegShift(op, r_dest_src1, r_src2, ENCODE_NO_SHIFT);
   }
@@ -699,7 +812,7 @@
   int64_t abs_value = (neg) ? -value : value;
   ArmOpcode opcode = kA64Brk1d;
   ArmOpcode alt_opcode = kA64Brk1d;
-  int32_t log_imm = -1;
+  bool is_logical = false;
   bool is_wide = r_dest.Is64Bit();
   ArmOpcode wide = (is_wide) ? WIDE(0) : UNWIDE(0);
   int info = 0;
@@ -734,65 +847,89 @@
         opcode = (neg) ? kA64Add4RRdT : kA64Sub4RRdT;
         return NewLIR4(opcode | wide, r_dest.GetReg(), r_src1.GetReg(), abs_value >> 12, 1);
       } else {
-        log_imm = -1;
         alt_opcode = (op == kOpAdd) ? kA64Add4RRre : kA64Sub4RRre;
         info = EncodeExtend(is_wide ? kA64Uxtx : kA64Uxtw, 0);
       }
       break;
-    // case kOpRsub:
-    //   opcode = kThumb2RsubRRI8M;
-    //   alt_opcode = kThumb2RsubRRR;
-    //   break;
     case kOpAdc:
-      log_imm = -1;
       alt_opcode = kA64Adc3rrr;
       break;
     case kOpSbc:
-      log_imm = -1;
       alt_opcode = kA64Sbc3rrr;
       break;
     case kOpOr:
-      log_imm = EncodeLogicalImmediate(is_wide, value);
+      is_logical = true;
       opcode = kA64Orr3Rrl;
       alt_opcode = kA64Orr4rrro;
       break;
     case kOpAnd:
-      log_imm = EncodeLogicalImmediate(is_wide, value);
+      is_logical = true;
       opcode = kA64And3Rrl;
       alt_opcode = kA64And4rrro;
       break;
     case kOpXor:
-      log_imm = EncodeLogicalImmediate(is_wide, value);
+      is_logical = true;
       opcode = kA64Eor3Rrl;
       alt_opcode = kA64Eor4rrro;
       break;
     case kOpMul:
       // TUNING: power of 2, shift & add
-      log_imm = -1;
       alt_opcode = kA64Mul3rrr;
       break;
     default:
       LOG(FATAL) << "Bad opcode: " << op;
   }
 
-  if (log_imm >= 0) {
-    return NewLIR3(opcode | wide, r_dest.GetReg(), r_src1.GetReg(), log_imm);
-  } else {
-    RegStorage r_scratch;
-    if (is_wide) {
-      r_scratch = AllocTempWide();
-      LoadConstantWide(r_scratch, value);
+  if (is_logical) {
+    int log_imm = EncodeLogicalImmediate(is_wide, value);
+    if (log_imm >= 0) {
+      return NewLIR3(opcode | wide, r_dest.GetReg(), r_src1.GetReg(), log_imm);
     } else {
-      r_scratch = AllocTemp();
-      LoadConstant(r_scratch, value);
+      // When the immediate is either 0 or ~0, the logical operation can be trivially reduced
+      // to a - possibly negated - assignment.
+      if (value == 0) {
+        switch (op) {
+          case kOpOr:
+          case kOpXor:
+            // Or/Xor by zero reduces to an assignment.
+            return NewLIR2(kA64Mov2rr | wide, r_dest.GetReg(), r_src1.GetReg());
+          default:
+            // And by zero reduces to a `mov rdest, xzr'.
+            DCHECK(op == kOpAnd);
+            return NewLIR2(kA64Mov2rr | wide, r_dest.GetReg(), (is_wide) ? rxzr : rwzr);
+        }
+      } else if (value == INT64_C(-1)
+                 || (!is_wide && static_cast<uint32_t>(value) == ~UINT32_C(0))) {
+        switch (op) {
+          case kOpAnd:
+            // And by -1 reduces to an assignment.
+            return NewLIR2(kA64Mov2rr | wide, r_dest.GetReg(), r_src1.GetReg());
+          case kOpXor:
+            // Xor by -1 reduces to an `mvn rdest, rsrc'.
+            return NewLIR2(kA64Mvn2rr | wide, r_dest.GetReg(), r_src1.GetReg());
+          default:
+            // Or by -1 reduces to a `mvn rdest, xzr'.
+            DCHECK(op == kOpOr);
+            return NewLIR2(kA64Mvn2rr | wide, r_dest.GetReg(), (is_wide) ? rxzr : rwzr);
+        }
+      }
     }
-    if (EncodingMap[alt_opcode].flags & IS_QUAD_OP)
-      res = NewLIR4(alt_opcode | wide, r_dest.GetReg(), r_src1.GetReg(), r_scratch.GetReg(), info);
-    else
-      res = NewLIR3(alt_opcode | wide, r_dest.GetReg(), r_src1.GetReg(), r_scratch.GetReg());
-    FreeTemp(r_scratch);
-    return res;
   }
+
+  RegStorage r_scratch;
+  if (is_wide) {
+    r_scratch = AllocTempWide();
+    LoadConstantWide(r_scratch, value);
+  } else {
+    r_scratch = AllocTemp();
+    LoadConstant(r_scratch, value);
+  }
+  if (EncodingMap[alt_opcode].flags & IS_QUAD_OP)
+    res = NewLIR4(alt_opcode | wide, r_dest.GetReg(), r_src1.GetReg(), r_scratch.GetReg(), info);
+  else
+    res = NewLIR3(alt_opcode | wide, r_dest.GetReg(), r_src1.GetReg(), r_scratch.GetReg());
+  FreeTemp(r_scratch);
+  return res;
 }
 
 LIR* Arm64Mir2Lir::OpRegImm(OpKind op, RegStorage r_dest_src1, int value) {
@@ -825,15 +962,6 @@
     }
     OpRegImm64(op, r_dest_src1, abs_value & (~INT64_C(0xfff)));
     return OpRegImm64(op, r_dest_src1, abs_value & 0xfff);
-  } else if (LIKELY(A64_REG_IS_SP(r_dest_src1.GetReg()) && (op == kOpAdd || op == kOpSub))) {
-    // Note: "sub sp, sp, Xm" is not correct on arm64.
-    // We need special instructions for SP.
-    // Also operation on 32-bit SP should be avoided.
-    DCHECK(IS_WIDE(wide));
-    RegStorage r_tmp = AllocTempWide();
-    OpRegRegImm(kOpAdd, r_tmp, r_dest_src1, 0);
-    OpRegImm64(op, r_tmp, value);
-    return OpRegRegImm(kOpAdd, r_dest_src1, r_tmp, 0);
   } else {
     RegStorage r_tmp;
     LIR* res;
@@ -878,10 +1006,14 @@
 }
 
 int Arm64Mir2Lir::EncodeShift(int shift_type, int amount) {
+  DCHECK_EQ(shift_type & 0x3, shift_type);
+  DCHECK_EQ(amount & 0x3f, amount);
   return ((shift_type & 0x3) << 7) | (amount & 0x3f);
 }
 
 int Arm64Mir2Lir::EncodeExtend(int extend_type, int amount) {
+  DCHECK_EQ(extend_type & 0x7, extend_type);
+  DCHECK_EQ(amount & 0x7, amount);
   return  (1 << 6) | ((extend_type & 0x7) << 3) | (amount & 0x7);
 }
 
diff --git a/compiler/dex/quick/codegen_util.cc b/compiler/dex/quick/codegen_util.cc
index 463f277..2a51b49 100644
--- a/compiler/dex/quick/codegen_util.cc
+++ b/compiler/dex/quick/codegen_util.cc
@@ -983,6 +983,8 @@
       estimated_native_code_size_(0),
       reg_pool_(NULL),
       live_sreg_(0),
+      core_vmap_table_(mir_graph->GetArena()->Adapter()),
+      fp_vmap_table_(mir_graph->GetArena()->Adapter()),
       num_core_spills_(0),
       num_fp_spills_(0),
       frame_size_(0),
diff --git a/compiler/dex/quick/dex_file_method_inliner.cc b/compiler/dex/quick/dex_file_method_inliner.cc
index 0e46c96..7abf3e7 100644
--- a/compiler/dex/quick/dex_file_method_inliner.cc
+++ b/compiler/dex/quick/dex_file_method_inliner.cc
@@ -48,6 +48,11 @@
     true,   // kIntrinsicMinMaxFloat
     true,   // kIntrinsicMinMaxDouble
     true,   // kIntrinsicSqrt
+    true,   // kIntrinsicCeil
+    true,   // kIntrinsicFloor
+    true,   // kIntrinsicRint
+    true,   // kIntrinsicRoundFloat
+    true,   // kIntrinsicRoundDouble
     false,  // kIntrinsicGet
     false,  // kIntrinsicCharAt
     false,  // kIntrinsicCompareTo
@@ -75,6 +80,11 @@
 COMPILE_ASSERT(kIntrinsicIsStatic[kIntrinsicMinMaxFloat], MinMaxFloat_must_be_static);
 COMPILE_ASSERT(kIntrinsicIsStatic[kIntrinsicMinMaxDouble], MinMaxDouble_must_be_static);
 COMPILE_ASSERT(kIntrinsicIsStatic[kIntrinsicSqrt], Sqrt_must_be_static);
+COMPILE_ASSERT(kIntrinsicIsStatic[kIntrinsicCeil], Ceil_must_be_static);
+COMPILE_ASSERT(kIntrinsicIsStatic[kIntrinsicFloor], Floor_must_be_static);
+COMPILE_ASSERT(kIntrinsicIsStatic[kIntrinsicRint], Rint_must_be_static);
+COMPILE_ASSERT(kIntrinsicIsStatic[kIntrinsicRoundFloat], RoundFloat_must_be_static);
+COMPILE_ASSERT(kIntrinsicIsStatic[kIntrinsicRoundDouble], RoundDouble_must_be_static);
 COMPILE_ASSERT(!kIntrinsicIsStatic[kIntrinsicGet], Get_must_not_be_static);
 COMPILE_ASSERT(!kIntrinsicIsStatic[kIntrinsicCharAt], CharAt_must_not_be_static);
 COMPILE_ASSERT(!kIntrinsicIsStatic[kIntrinsicCompareTo], CompareTo_must_not_be_static);
@@ -155,6 +165,10 @@
     "max",                   // kNameCacheMax
     "min",                   // kNameCacheMin
     "sqrt",                  // kNameCacheSqrt
+    "ceil",                  // kNameCacheCeil
+    "floor",                 // kNameCacheFloor
+    "rint",                  // kNameCacheRint
+    "round",                 // kNameCacheRound
     "get",                   // kNameCacheGet
     "charAt",                // kNameCacheCharAt
     "compareTo",             // kNameCacheCompareTo
@@ -314,6 +328,17 @@
     INTRINSIC(JavaLangMath,       Sqrt, D_D, kIntrinsicSqrt, 0),
     INTRINSIC(JavaLangStrictMath, Sqrt, D_D, kIntrinsicSqrt, 0),
 
+    INTRINSIC(JavaLangMath,       Ceil, D_D, kIntrinsicCeil, 0),
+    INTRINSIC(JavaLangStrictMath, Ceil, D_D, kIntrinsicCeil, 0),
+    INTRINSIC(JavaLangMath,       Floor, D_D, kIntrinsicFloor, 0),
+    INTRINSIC(JavaLangStrictMath, Floor, D_D, kIntrinsicFloor, 0),
+    INTRINSIC(JavaLangMath,       Rint, D_D, kIntrinsicRint, 0),
+    INTRINSIC(JavaLangStrictMath, Rint, D_D, kIntrinsicRint, 0),
+    INTRINSIC(JavaLangMath,       Round, F_I, kIntrinsicRoundFloat, 0),
+    INTRINSIC(JavaLangStrictMath, Round, F_I, kIntrinsicRoundFloat, 0),
+    INTRINSIC(JavaLangMath,       Round, D_J, kIntrinsicRoundDouble, 0),
+    INTRINSIC(JavaLangStrictMath, Round, D_J, kIntrinsicRoundDouble, 0),
+
     INTRINSIC(JavaLangRefReference, Get, _Object, kIntrinsicGet, 0),
 
     INTRINSIC(JavaLangString, CharAt, I_C, kIntrinsicCharAt, 0),
@@ -436,6 +461,16 @@
       return backend->GenInlinedMinMaxFP(info, intrinsic.d.data & kIntrinsicFlagMin, true /* is_double */);
     case kIntrinsicSqrt:
       return backend->GenInlinedSqrt(info);
+    case kIntrinsicCeil:
+      return backend->GenInlinedCeil(info);
+    case kIntrinsicFloor:
+      return backend->GenInlinedFloor(info);
+    case kIntrinsicRint:
+      return backend->GenInlinedRint(info);
+    case kIntrinsicRoundFloat:
+      return backend->GenInlinedRound(info, false /* is_double */);
+    case kIntrinsicRoundDouble:
+      return backend->GenInlinedRound(info, true /* is_double */);
     case kIntrinsicGet:
       return backend->GenInlinedGet(info);
     case kIntrinsicCharAt:
diff --git a/compiler/dex/quick/dex_file_method_inliner.h b/compiler/dex/quick/dex_file_method_inliner.h
index cb8c165..1bd3c48 100644
--- a/compiler/dex/quick/dex_file_method_inliner.h
+++ b/compiler/dex/quick/dex_file_method_inliner.h
@@ -141,6 +141,10 @@
       kNameCacheMax,
       kNameCacheMin,
       kNameCacheSqrt,
+      kNameCacheCeil,
+      kNameCacheFloor,
+      kNameCacheRint,
+      kNameCacheRound,
       kNameCacheGet,
       kNameCacheCharAt,
       kNameCacheCompareTo,
diff --git a/compiler/dex/quick/gen_common.cc b/compiler/dex/quick/gen_common.cc
index aae9155..0054f34 100644
--- a/compiler/dex/quick/gen_common.cc
+++ b/compiler/dex/quick/gen_common.cc
@@ -256,7 +256,7 @@
     RegLocation rl_temp = UpdateLoc(rl_src2);
     int32_t constant_value = mir_graph_->ConstantValue(rl_src2);
     if ((rl_temp.location == kLocDalvikFrame) &&
-        InexpensiveConstantInt(constant_value)) {
+        InexpensiveConstantInt(constant_value, opcode)) {
       // OK - convert this to a compare immediate and branch
       OpCmpImmBranch(cond, rl_src1.reg, mir_graph_->ConstantValue(rl_src2), taken);
       return;
@@ -1808,10 +1808,6 @@
 
   switch (opcode) {
     case Instruction::NOT_LONG:
-      if (cu_->instruction_set == kArm64 || cu_->instruction_set == kX86_64) {
-        GenNotLong(rl_dest, rl_src2);
-        return;
-      }
       rl_src2 = LoadValueWide(rl_src2, kCoreReg);
       rl_result = EvalLoc(rl_dest, kCoreReg, true);
       // Check for destructive overlap
@@ -1829,39 +1825,22 @@
       return;
     case Instruction::ADD_LONG:
     case Instruction::ADD_LONG_2ADDR:
-      if (cu_->instruction_set != kThumb2) {
-        GenAddLong(opcode, rl_dest, rl_src1, rl_src2);
-        return;
-      }
       first_op = kOpAdd;
       second_op = kOpAdc;
       break;
     case Instruction::SUB_LONG:
     case Instruction::SUB_LONG_2ADDR:
-      if (cu_->instruction_set != kThumb2) {
-        GenSubLong(opcode, rl_dest, rl_src1, rl_src2);
-        return;
-      }
       first_op = kOpSub;
       second_op = kOpSbc;
       break;
     case Instruction::MUL_LONG:
     case Instruction::MUL_LONG_2ADDR:
-      if (cu_->instruction_set != kMips) {
-        GenMulLong(opcode, rl_dest, rl_src1, rl_src2);
-        return;
-      } else {
-        call_out = true;
-        TargetReg(kRet0, kNotWide).GetReg();
-        target = kQuickLmul;
-      }
+      call_out = true;
+      ret_reg = TargetReg(kRet0, kNotWide).GetReg();
+      target = kQuickLmul;
       break;
     case Instruction::DIV_LONG:
     case Instruction::DIV_LONG_2ADDR:
-      if (cu_->instruction_set == kArm64 || cu_->instruction_set == kX86_64) {
-        GenDivRemLong(opcode, rl_dest, rl_src1, rl_src2, /*is_div*/ true);
-        return;
-      }
       call_out = true;
       check_zero = true;
       ret_reg = TargetReg(kRet0, kNotWide).GetReg();
@@ -1869,10 +1848,6 @@
       break;
     case Instruction::REM_LONG:
     case Instruction::REM_LONG_2ADDR:
-      if (cu_->instruction_set == kArm64 || cu_->instruction_set == kX86_64) {
-        GenDivRemLong(opcode, rl_dest, rl_src1, rl_src2, /*is_div*/ false);
-        return;
-      }
       call_out = true;
       check_zero = true;
       target = kQuickLmod;
@@ -1882,37 +1857,19 @@
       break;
     case Instruction::AND_LONG_2ADDR:
     case Instruction::AND_LONG:
-      if (cu_->instruction_set == kX86 || cu_->instruction_set == kX86_64 ||
-          cu_->instruction_set == kArm64) {
-        return GenAndLong(opcode, rl_dest, rl_src1, rl_src2);
-      }
       first_op = kOpAnd;
       second_op = kOpAnd;
       break;
     case Instruction::OR_LONG:
     case Instruction::OR_LONG_2ADDR:
-      if (cu_->instruction_set == kX86 || cu_->instruction_set == kX86_64 ||
-          cu_->instruction_set == kArm64) {
-        GenOrLong(opcode, rl_dest, rl_src1, rl_src2);
-        return;
-      }
       first_op = kOpOr;
       second_op = kOpOr;
       break;
     case Instruction::XOR_LONG:
     case Instruction::XOR_LONG_2ADDR:
-      if (cu_->instruction_set == kX86 || cu_->instruction_set == kX86_64 ||
-          cu_->instruction_set == kArm64) {
-        GenXorLong(opcode, rl_dest, rl_src1, rl_src2);
-        return;
-      }
       first_op = kOpXor;
       second_op = kOpXor;
       break;
-    case Instruction::NEG_LONG: {
-      GenNegLong(rl_dest, rl_src2);
-      return;
-    }
     default:
       LOG(FATAL) << "Invalid long arith op";
   }
diff --git a/compiler/dex/quick/gen_invoke.cc b/compiler/dex/quick/gen_invoke.cc
index a0a2ed0..e70b0c5 100755
--- a/compiler/dex/quick/gen_invoke.cc
+++ b/compiler/dex/quick/gen_invoke.cc
@@ -1166,8 +1166,14 @@
   }
   if (use_direct_type_ptr) {
     LoadConstant(reg_class, direct_type_ptr);
-  } else {
+  } else if (cu_->dex_file == old_dex) {
+    // TODO: Bug 16656190 If cu_->dex_file != old_dex the patching could retrieve the wrong class
+    // since the load class is indexed only by the type_idx. We should include which dex file a
+    // class is from in the LoadClassType LIR.
     LoadClassType(type_idx, kArg1);
+  } else {
+    cu_->dex_file = old_dex;
+    return false;
   }
   cu_->dex_file = old_dex;
 
@@ -1427,6 +1433,22 @@
   return false;
 }
 
+bool Mir2Lir::GenInlinedCeil(CallInfo* info) {
+  return false;
+}
+
+bool Mir2Lir::GenInlinedFloor(CallInfo* info) {
+  return false;
+}
+
+bool Mir2Lir::GenInlinedRint(CallInfo* info) {
+  return false;
+}
+
+bool Mir2Lir::GenInlinedRound(CallInfo* info, bool is_double) {
+  return false;
+}
+
 bool Mir2Lir::GenInlinedFloatCvt(CallInfo* info) {
   if (cu_->instruction_set == kMips) {
     // TODO - add Mips implementation
@@ -1568,16 +1590,6 @@
                   kNotVolatile);
       break;
 
-    case kX86:
-      reinterpret_cast<X86Mir2Lir*>(this)->OpRegThreadMem(kOpMov, rl_result.reg,
-                                                          Thread::PeerOffset<4>());
-      break;
-
-    case kX86_64:
-      reinterpret_cast<X86Mir2Lir*>(this)->OpRegThreadMem(kOpMov, rl_result.reg,
-                                                          Thread::PeerOffset<8>());
-      break;
-
     default:
       LOG(FATAL) << "Unexpected isa " << cu_->instruction_set;
   }
diff --git a/compiler/dex/quick/local_optimizations.cc b/compiler/dex/quick/local_optimizations.cc
index 2893157..eec2b32 100644
--- a/compiler/dex/quick/local_optimizations.cc
+++ b/compiler/dex/quick/local_optimizations.cc
@@ -15,15 +15,43 @@
  */
 
 #include "dex/compiler_internals.h"
+#include "dex/quick/mir_to_lir-inl.h"
 
 namespace art {
 
 #define DEBUG_OPT(X)
 
+#define LOAD_STORE_CHECK_REG_DEP(mask, check) (mask.Intersects(*check->u.m.def_mask))
+
 /* Check RAW, WAR, and RAW dependency on the register operands */
 #define CHECK_REG_DEP(use, def, check) (def.Intersects(*check->u.m.use_mask)) || \
                                        (use.Union(def).Intersects(*check->u.m.def_mask))
 
+/* Load Store Elimination filter:
+ *  - Wide Load/Store
+ *  - Exclusive Load/Store
+ *  - Quad operand Load/Store
+ *  - List Load/Store
+ *  - IT blocks
+ *  - Branch
+ *  - Dmb
+ */
+#define LOAD_STORE_FILTER(flags) ((flags & (IS_QUAD_OP|IS_STORE)) == (IS_QUAD_OP|IS_STORE) || \
+                                 (flags & (IS_QUAD_OP|IS_LOAD)) == (IS_QUAD_OP|IS_LOAD) || \
+                                 (flags & REG_USE012) == REG_USE012 || \
+                                 (flags & REG_DEF01) == REG_DEF01 || \
+                                 (flags & REG_DEF_LIST0) || \
+                                 (flags & REG_DEF_LIST1) || \
+                                 (flags & REG_USE_LIST0) || \
+                                 (flags & REG_USE_LIST1) || \
+                                 (flags & REG_DEF_FPCS_LIST0) || \
+                                 (flags & REG_DEF_FPCS_LIST2) || \
+                                 (flags & REG_USE_FPCS_LIST0) || \
+                                 (flags & REG_USE_FPCS_LIST2) || \
+                                 (flags & IS_VOLATILE) || \
+                                 (flags & IS_BRANCH) || \
+                                 (flags & IS_IT))
+
 /* Scheduler heuristics */
 #define MAX_HOIST_DISTANCE 20
 #define LDLD_DISTANCE 4
@@ -43,6 +71,7 @@
   /* Insert a move to replace the load */
   LIR* move_lir;
   move_lir = OpRegCopyNoInsert(dest, src);
+  move_lir->dalvik_offset = orig_lir->dalvik_offset;
   /*
    * Insert the converted instruction after the original since the
    * optimization is scannng in the top-down order and the new instruction
@@ -52,8 +81,53 @@
   InsertLIRAfter(orig_lir, move_lir);
 }
 
+void Mir2Lir::DumpDependentInsnPair(LIR* check_lir, LIR* this_lir, const char* type) {
+  LOG(INFO) << type;
+  LOG(INFO) << "Check LIR:";
+  DumpLIRInsn(check_lir, 0);
+  LOG(INFO) << "This LIR:";
+  DumpLIRInsn(this_lir, 0);
+}
+
+inline void Mir2Lir::EliminateLoad(LIR* lir, int reg_id) {
+  DCHECK(RegStorage::SameRegType(lir->operands[0], reg_id));
+  RegStorage dest_reg, src_reg;
+
+  /* Same Register - Nop */
+  if (lir->operands[0] == reg_id) {
+    NopLIR(lir);
+    return;
+  }
+
+  /* different Regsister - Move + Nop */
+  switch (reg_id & RegStorage::kShapeTypeMask) {
+    case RegStorage::k32BitSolo | RegStorage::kCoreRegister:
+      dest_reg = RegStorage::Solo32(lir->operands[0]);
+      src_reg = RegStorage::Solo32(reg_id);
+      break;
+    case RegStorage::k64BitSolo | RegStorage::kCoreRegister:
+      dest_reg = RegStorage::Solo64(lir->operands[0]);
+      src_reg = RegStorage::Solo64(reg_id);
+      break;
+    case RegStorage::k32BitSolo | RegStorage::kFloatingPoint:
+      dest_reg = RegStorage::FloatSolo32(lir->operands[0]);
+      src_reg = RegStorage::FloatSolo32(reg_id);
+      break;
+    case RegStorage::k64BitSolo | RegStorage::kFloatingPoint:
+      dest_reg = RegStorage::FloatSolo64(lir->operands[0]);
+      src_reg = RegStorage::FloatSolo64(reg_id);
+      break;
+    default:
+      LOG(INFO) << "Load Store: Unsuported register type!";
+      return;
+  }
+  ConvertMemOpIntoMove(lir, dest_reg, src_reg);
+  NopLIR(lir);
+  return;
+}
+
 /*
- * Perform a pass of top-down walk, from the second-last instruction in the
+ * Perform a pass of top-down walk, from the first to the last instruction in the
  * superblock, to eliminate redundant loads and stores.
  *
  * An earlier load can eliminate a later load iff
@@ -66,213 +140,172 @@
  *   2) The native register is not clobbered in between
  *   3) The memory location is not written to in between
  *
- * A later store can be eliminated by an earlier store iff
+ * An earlier store can eliminate a later store iff
  *   1) They are must-aliases
  *   2) The memory location is not written to in between
  */
 void Mir2Lir::ApplyLoadStoreElimination(LIR* head_lir, LIR* tail_lir) {
-  LIR* this_lir;
+  LIR* this_lir, *check_lir;
+  std::vector<int> alias_list;
 
   if (head_lir == tail_lir) {
     return;
   }
 
-  for (this_lir = PREV_LIR(tail_lir); this_lir != head_lir; this_lir = PREV_LIR(this_lir)) {
-    if (IsPseudoLirOp(this_lir->opcode)) {
+  for (this_lir = head_lir; this_lir != tail_lir; this_lir = NEXT_LIR(this_lir)) {
+    if (this_lir->flags.is_nop || IsPseudoLirOp(this_lir->opcode)) {
       continue;
     }
 
-    int sink_distance = 0;
-
     uint64_t target_flags = GetTargetInstFlags(this_lir->opcode);
+    /* Target LIR - skip if instr is:
+     *  - NOP
+     *  - Branch
+     *  - Load and store
+     *  - Wide load
+     *  - Wide store
+     *  - Exclusive load/store
+     */
+    if (LOAD_STORE_FILTER(target_flags) ||
+        ((target_flags & (IS_LOAD | IS_STORE)) == (IS_LOAD | IS_STORE)) ||
+        !(target_flags & (IS_LOAD | IS_STORE))) {
+      continue;
+    }
+    int native_reg_id = this_lir->operands[0];
+    int dest_reg_id = this_lir->operands[1];
+    bool is_this_lir_load = target_flags & IS_LOAD;
+    ResourceMask this_mem_mask = kEncodeMem.Intersection(this_lir->u.m.use_mask->Union(
+                                                        *this_lir->u.m.def_mask));
 
-    /* Skip non-interesting instructions */
-    if ((this_lir->flags.is_nop == true) ||
-        (target_flags & IS_BRANCH) ||
-        ((target_flags & (REG_DEF0 | REG_DEF1)) == (REG_DEF0 | REG_DEF1)) ||  // Skip wide loads.
-        ((target_flags & (REG_USE0 | REG_USE1 | REG_USE2)) ==
-         (REG_USE0 | REG_USE1 | REG_USE2)) ||  // Skip wide stores.
-        // Skip instructions that are neither loads or stores.
-        !(target_flags & (IS_LOAD | IS_STORE)) ||
-        // Skip instructions that do both load and store.
-        ((target_flags & (IS_STORE | IS_LOAD)) == (IS_STORE | IS_LOAD))) {
+    /* Memory region */
+    if (!this_mem_mask.Intersects(kEncodeLiteral.Union(kEncodeDalvikReg)) &&
+      (!this_mem_mask.Intersects(kEncodeLiteral.Union(kEncodeHeapRef)))) {
       continue;
     }
 
-    int native_reg_id;
-    if (cu_->instruction_set == kX86 || cu_->instruction_set == kX86_64) {
-      // If x86, location differs depending on whether memory/reg operation.
-      native_reg_id = (target_flags & IS_STORE) ? this_lir->operands[2] : this_lir->operands[0];
-    } else {
-      native_reg_id = this_lir->operands[0];
-    }
-    bool is_this_lir_load = target_flags & IS_LOAD;
-    LIR* check_lir;
-    /* Use the mem mask to determine the rough memory location */
-    ResourceMask this_mem_mask = kEncodeMem.Intersection(
-        this_lir->u.m.use_mask->Union(*this_lir->u.m.def_mask));
-
-    /*
-     * Currently only eliminate redundant ld/st for constant and Dalvik
-     * register accesses.
-     */
-    if (!this_mem_mask.Intersects(kEncodeLiteral.Union(kEncodeDalvikReg))) {
+    /* Does not redefine the address */
+    if (this_lir->u.m.def_mask->Intersects(*this_lir->u.m.use_mask)) {
       continue;
     }
 
     ResourceMask stop_def_reg_mask = this_lir->u.m.def_mask->Without(kEncodeMem);
+    ResourceMask stop_use_reg_mask = this_lir->u.m.use_mask->Without(kEncodeMem);
 
-    /*
-     * Add pc to the resource mask to prevent this instruction
-     * from sinking past branch instructions. Also take out the memory
-     * region bits since stop_mask is used to check data/control
-     * dependencies.
-     *
-     * Note: on x86(-64) and Arm64 we use the IsBranch bit, as the PC is not exposed.
-     */
-    ResourceMask pc_encoding = GetPCUseDefEncoding();
-    if (pc_encoding == kEncodeNone) {
-      // TODO: Stop the abuse of kIsBranch as a bit specification for ResourceMask.
-      pc_encoding = ResourceMask::Bit(kIsBranch);
+    /* The ARM backend can load/store PC */
+    ResourceMask uses_pc = GetPCUseDefEncoding();
+    if (uses_pc.Intersects(this_lir->u.m.use_mask->Union(*this_lir->u.m.def_mask))) {
+      continue;
     }
-    ResourceMask  stop_use_reg_mask = pc_encoding.Union(*this_lir->u.m.use_mask).
-        Without(kEncodeMem);
 
+    /* Initialize alias list */
+    alias_list.clear();
+    ResourceMask alias_reg_list_mask = kEncodeNone;
+    if (!this_mem_mask.Intersects(kEncodeLiteral)) {
+      alias_list.push_back(dest_reg_id);
+      SetupRegMask(&alias_reg_list_mask, dest_reg_id);
+    }
+
+    /* Scan through the BB for posible elimination candidates */
     for (check_lir = NEXT_LIR(this_lir); check_lir != tail_lir; check_lir = NEXT_LIR(check_lir)) {
-      /*
-       * Skip already dead instructions (whose dataflow information is
-       * outdated and misleading).
-       */
       if (check_lir->flags.is_nop || IsPseudoLirOp(check_lir->opcode)) {
         continue;
       }
 
-      ResourceMask check_mem_mask = kEncodeMem.Intersection(
-          check_lir->u.m.use_mask->Union(*check_lir->u.m.def_mask));
-      ResourceMask alias_condition = this_mem_mask.Intersection(check_mem_mask);
-      bool stop_here = false;
+      if (uses_pc.Intersects(check_lir->u.m.use_mask->Union(*check_lir->u.m.def_mask))) {
+        break;
+      }
 
-      /*
-       * Potential aliases seen - check the alias relations
-       */
+      ResourceMask check_mem_mask = kEncodeMem.Intersection(check_lir->u.m.use_mask->Union(
+                                                          *check_lir->u.m.def_mask));
+      ResourceMask alias_mem_mask = this_mem_mask.Intersection(check_mem_mask);
       uint64_t check_flags = GetTargetInstFlags(check_lir->opcode);
-      // TUNING: Support instructions with multiple register targets.
-      if ((check_flags & (REG_DEF0 | REG_DEF1)) == (REG_DEF0 | REG_DEF1)) {
+      bool stop_here = false;
+      bool pass_over = false;
+
+      /* Check LIR - skip if instr is:
+       *  - Wide Load
+       *  - Wide Store
+       *  - Branch
+       *  - Dmb
+       *  - Exclusive load/store
+       *  - IT blocks
+       *  - Quad loads
+       */
+      if (LOAD_STORE_FILTER(check_flags)) {
         stop_here = true;
-      } else if (!check_mem_mask.Equals(kEncodeMem) && !alias_condition.Equals(kEncodeNone)) {
-        bool is_check_lir_load = check_flags & IS_LOAD;
-        if  (alias_condition.Equals(kEncodeLiteral)) {
-          /*
-           * Should only see literal loads in the instruction
-           * stream.
-           */
-          DCHECK(!(check_flags & IS_STORE));
-          /* Same value && same register type */
-          if (check_lir->flags.alias_info == this_lir->flags.alias_info &&
-              RegStorage::SameRegType(check_lir->operands[0], native_reg_id)) {
-            /*
-             * Different destination register - insert
-             * a move
-             */
-            if (check_lir->operands[0] != native_reg_id) {
-              // TODO: update for 64-bit regs.
-              ConvertMemOpIntoMove(check_lir, RegStorage::Solo32(check_lir->operands[0]),
-                                   RegStorage::Solo32(native_reg_id));
-            }
-            NopLIR(check_lir);
-          }
-        } else if (alias_condition.Equals(kEncodeDalvikReg)) {
-          /* Must alias */
-          if (check_lir->flags.alias_info == this_lir->flags.alias_info) {
-            /* Only optimize compatible registers */
-            bool reg_compatible = RegStorage::SameRegType(check_lir->operands[0], native_reg_id);
-            if ((is_this_lir_load && is_check_lir_load) ||
-                (!is_this_lir_load && is_check_lir_load)) {
-              /* RAR or RAW */
-              if (reg_compatible) {
-                /*
-                 * Different destination register -
-                 * insert a move
-                 */
-                if (check_lir->operands[0] != native_reg_id) {
-                  // TODO: update for 64-bit regs.
-                  ConvertMemOpIntoMove(check_lir, RegStorage::Solo32(check_lir->operands[0]),
-                                       RegStorage::Solo32(native_reg_id));
-                }
-                NopLIR(check_lir);
-              } else {
-                /*
-                 * Destinaions are of different types -
-                 * something complicated going on so
-                 * stop looking now.
-                 */
-                stop_here = true;
-              }
-            } else if (is_this_lir_load && !is_check_lir_load) {
-              /* WAR - register value is killed */
-              stop_here = true;
-            } else if (!is_this_lir_load && !is_check_lir_load) {
-              /* WAW - nuke the earlier store */
-              NopLIR(this_lir);
-              stop_here = true;
-            }
-          /* Partial overlap */
-          } else if (IsDalvikRegisterClobbered(this_lir, check_lir)) {
-            /*
-             * It is actually ok to continue if check_lir
-             * is a read. But it is hard to make a test
-             * case for this so we just stop here to be
-             * conservative.
-             */
-            stop_here = true;
+        /* Possible alias or result of earlier pass */
+      } else if (check_flags & IS_MOVE) {
+        for (auto &reg : alias_list) {
+          if (RegStorage::RegNum(check_lir->operands[1]) == RegStorage::RegNum(reg)) {
+            pass_over = true;
+            alias_list.push_back(check_lir->operands[0]);
+            SetupRegMask(&alias_reg_list_mask, check_lir->operands[0]);
           }
         }
-        /* Memory content may be updated. Stop looking now. */
+      /* Memory regions */
+      } else if (!alias_mem_mask.Equals(kEncodeNone)) {
+        DCHECK((check_flags & IS_LOAD) || (check_flags & IS_STORE));
+        bool is_check_lir_load = check_flags & IS_LOAD;
+        bool reg_compatible = RegStorage::SameRegType(check_lir->operands[0], native_reg_id);
+
+        if (alias_mem_mask.Equals(kEncodeLiteral)) {
+          DCHECK(check_flags & IS_LOAD);
+          /* Same value && same register type */
+          if (reg_compatible && (this_lir->target == check_lir->target)) {
+            DEBUG_OPT(DumpDependentInsnPair(check_lir, this_lir, "LITERAL"));
+            EliminateLoad(check_lir, native_reg_id);
+          }
+        } else if (((alias_mem_mask.Equals(kEncodeDalvikReg)) || (alias_mem_mask.Equals(kEncodeHeapRef))) &&
+                   alias_reg_list_mask.Intersects((check_lir->u.m.use_mask)->Without(kEncodeMem))) {
+          bool same_offset = (GetInstructionOffset(this_lir) == GetInstructionOffset(check_lir));
+          if (same_offset && !is_check_lir_load) {
+            if (check_lir->operands[0] != native_reg_id) {
+              DEBUG_OPT(DumpDependentInsnPair(check_lir, this_lir, "STORE STOP"));
+              stop_here = true;
+              break;
+            }
+          }
+
+          if (reg_compatible && same_offset &&
+              ((is_this_lir_load && is_check_lir_load)  /* LDR - LDR */ ||
+              (!is_this_lir_load && is_check_lir_load)  /* STR - LDR */ ||
+              (!is_this_lir_load && !is_check_lir_load) /* STR - STR */)) {
+            DEBUG_OPT(DumpDependentInsnPair(check_lir, this_lir, "LOAD STORE"));
+            EliminateLoad(check_lir, native_reg_id);
+          }
+        } else {
+          /* Unsupported memory region */
+        }
+      }
+
+      if (pass_over) {
+        continue;
+      }
+
+      if (stop_here == false) {
+        bool stop_alias = LOAD_STORE_CHECK_REG_DEP(alias_reg_list_mask, check_lir);
+        if (stop_alias) {
+          /* Scan through alias list and if alias remove from alias list. */
+          for (auto &reg : alias_list) {
+            stop_alias = false;
+            ResourceMask alias_reg_mask = kEncodeNone;
+            SetupRegMask(&alias_reg_mask, reg);
+            stop_alias = LOAD_STORE_CHECK_REG_DEP(alias_reg_mask, check_lir);
+            if (stop_alias) {
+              ClearRegMask(&alias_reg_list_mask, reg);
+              alias_list.erase(std::remove(alias_list.begin(), alias_list.end(),
+                                           reg), alias_list.end());
+            }
+          }
+        }
+        ResourceMask stop_search_mask = stop_def_reg_mask.Union(stop_use_reg_mask);
+        stop_search_mask = stop_search_mask.Union(alias_reg_list_mask);
+        stop_here = LOAD_STORE_CHECK_REG_DEP(stop_search_mask, check_lir);
         if (stop_here) {
           break;
-        /* The check_lir has been transformed - check the next one */
-        } else if (check_lir->flags.is_nop) {
-          continue;
         }
-      }
-
-
-      /*
-       * this and check LIRs have no memory dependency. Now check if
-       * their register operands have any RAW, WAR, and WAW
-       * dependencies. If so, stop looking.
-       */
-      if (stop_here == false) {
-        stop_here = CHECK_REG_DEP(stop_use_reg_mask, stop_def_reg_mask, check_lir);
-      }
-
-      if (stop_here == true) {
-        if (cu_->instruction_set == kX86 || cu_->instruction_set == kX86_64) {
-          // Prevent stores from being sunk between ops that generate ccodes and
-          // ops that use them.
-          uint64_t flags = GetTargetInstFlags(check_lir->opcode);
-          if (sink_distance > 0 && (flags & IS_BRANCH) && (flags & USES_CCODES)) {
-            check_lir = PREV_LIR(check_lir);
-            sink_distance--;
-          }
-        }
-        DEBUG_OPT(dump_dependent_insn_pair(this_lir, check_lir, "REG CLOBBERED"));
-        /* Only sink store instructions */
-        if (sink_distance && !is_this_lir_load) {
-          LIR* new_store_lir =
-              static_cast<LIR*>(arena_->Alloc(sizeof(LIR), kArenaAllocLIR));
-          *new_store_lir = *this_lir;
-          /*
-           * Stop point found - insert *before* the check_lir
-           * since the instruction list is scanned in the
-           * top-down order.
-           */
-          InsertLIRBefore(check_lir, new_store_lir);
-          NopLIR(this_lir);
-        }
+      } else {
         break;
-      } else if (!check_lir->flags.is_nop) {
-        sink_distance++;
       }
     }
   }
@@ -385,7 +418,7 @@
 
       /* Found a new place to put the load - move it here */
       if (stop_here == true) {
-        DEBUG_OPT(dump_dependent_insn_pair(check_lir, this_lir "HOIST STOP"));
+        DEBUG_OPT(DumpDependentInsnPair(check_lir, this_lir, "HOIST STOP"));
         break;
       }
     }
diff --git a/compiler/dex/quick/mips/codegen_mips.h b/compiler/dex/quick/mips/codegen_mips.h
index 4bd2748..bd0c020 100644
--- a/compiler/dex/quick/mips/codegen_mips.h
+++ b/compiler/dex/quick/mips/codegen_mips.h
@@ -92,12 +92,6 @@
                      RegLocation rl_index, RegLocation rl_src, int scale, bool card_mark);
     void GenShiftImmOpLong(Instruction::Code opcode, RegLocation rl_dest, RegLocation rl_src1,
                            RegLocation rl_shift);
-    void GenMulLong(Instruction::Code opcode, RegLocation rl_dest, RegLocation rl_src1,
-                    RegLocation rl_src2);
-    void GenAddLong(Instruction::Code opcode, RegLocation rl_dest, RegLocation rl_src1,
-                    RegLocation rl_src2);
-    void GenAndLong(Instruction::Code opcode, RegLocation rl_dest, RegLocation rl_src1,
-                    RegLocation rl_src2);
     void GenArithOpDouble(Instruction::Code opcode, RegLocation rl_dest, RegLocation rl_src1,
                           RegLocation rl_src2);
     void GenArithOpFloat(Instruction::Code opcode, RegLocation rl_dest, RegLocation rl_src1,
@@ -112,16 +106,8 @@
     bool GenInlinedSqrt(CallInfo* info);
     bool GenInlinedPeek(CallInfo* info, OpSize size);
     bool GenInlinedPoke(CallInfo* info, OpSize size);
-    void GenNotLong(RegLocation rl_dest, RegLocation rl_src);
-    void GenNegLong(RegLocation rl_dest, RegLocation rl_src);
-    void GenOrLong(Instruction::Code opcode, RegLocation rl_dest, RegLocation rl_src1,
-                   RegLocation rl_src2);
-    void GenSubLong(Instruction::Code opcode, RegLocation rl_dest, RegLocation rl_src1,
-                    RegLocation rl_src2);
-    void GenXorLong(Instruction::Code opcode, RegLocation rl_dest, RegLocation rl_src1,
-                    RegLocation rl_src2);
-    void GenDivRemLong(Instruction::Code, RegLocation rl_dest, RegLocation rl_src1,
-                       RegLocation rl_src2, bool is_div);
+    void GenArithOpLong(Instruction::Code opcode, RegLocation rl_dest, RegLocation rl_src1,
+                        RegLocation rl_src2) OVERRIDE;
     RegLocation GenDivRem(RegLocation rl_dest, RegStorage reg_lo, RegStorage reg_hi, bool is_div);
     RegLocation GenDivRemLit(RegLocation rl_dest, RegStorage reg_lo, int lit, bool is_div);
     void GenCmpLong(RegLocation rl_dest, RegLocation rl_src1, RegLocation rl_src2);
@@ -196,6 +182,12 @@
     LIR* InvokeTrampoline(OpKind op, RegStorage r_tgt, QuickEntrypointEnum trampoline) OVERRIDE;
 
   private:
+    void GenNegLong(RegLocation rl_dest, RegLocation rl_src);
+    void GenAddLong(Instruction::Code opcode, RegLocation rl_dest, RegLocation rl_src1,
+                    RegLocation rl_src2);
+    void GenSubLong(Instruction::Code opcode, RegLocation rl_dest, RegLocation rl_src1,
+                    RegLocation rl_src2);
+
     void ConvertShortToLongBranch(LIR* lir);
     RegLocation GenDivRem(RegLocation rl_dest, RegLocation rl_src1,
                           RegLocation rl_src2, bool is_div, bool check_zero);
diff --git a/compiler/dex/quick/mips/int_mips.cc b/compiler/dex/quick/mips/int_mips.cc
index d727615..ea56989 100644
--- a/compiler/dex/quick/mips/int_mips.cc
+++ b/compiler/dex/quick/mips/int_mips.cc
@@ -392,11 +392,6 @@
 }
 
 
-void MipsMir2Lir::GenMulLong(Instruction::Code opcode, RegLocation rl_dest,
-                             RegLocation rl_src1, RegLocation rl_src2) {
-  LOG(FATAL) << "Unexpected use of GenMulLong for Mips";
-}
-
 void MipsMir2Lir::GenAddLong(Instruction::Code opcode, RegLocation rl_dest,
                              RegLocation rl_src1, RegLocation rl_src2) {
   rl_src1 = LoadValueWide(rl_src1, kCoreReg);
@@ -441,13 +436,27 @@
   StoreValueWide(rl_dest, rl_result);
 }
 
-void MipsMir2Lir::GenNotLong(RegLocation rl_dest, RegLocation rl_src) {
-  LOG(FATAL) << "Unexpected use GenNotLong()";
-}
+void MipsMir2Lir::GenArithOpLong(Instruction::Code opcode, RegLocation rl_dest, RegLocation rl_src1,
+                                 RegLocation rl_src2) {
+  switch (opcode) {
+    case Instruction::ADD_LONG:
+    case Instruction::ADD_LONG_2ADDR:
+      GenAddLong(opcode, rl_dest, rl_src1, rl_src2);
+      return;
+    case Instruction::SUB_LONG:
+    case Instruction::SUB_LONG_2ADDR:
+      GenSubLong(opcode, rl_dest, rl_src1, rl_src2);
+      return;
+    case Instruction::NEG_LONG:
+      GenNegLong(rl_dest, rl_src2);
+      return;
 
-void MipsMir2Lir::GenDivRemLong(Instruction::Code, RegLocation rl_dest, RegLocation rl_src1,
-                           RegLocation rl_src2, bool is_div) {
-  LOG(FATAL) << "Unexpected use GenDivRemLong()";
+    default:
+      break;
+  }
+
+  // Fallback for all other ops.
+  Mir2Lir::GenArithOpLong(opcode, rl_dest, rl_src1, rl_src2);
 }
 
 void MipsMir2Lir::GenNegLong(RegLocation rl_dest, RegLocation rl_src) {
@@ -470,22 +479,6 @@
   StoreValueWide(rl_dest, rl_result);
 }
 
-void MipsMir2Lir::GenAndLong(Instruction::Code opcode, RegLocation rl_dest,
-                             RegLocation rl_src1,
-                             RegLocation rl_src2) {
-  LOG(FATAL) << "Unexpected use of GenAndLong for Mips";
-}
-
-void MipsMir2Lir::GenOrLong(Instruction::Code opcode, RegLocation rl_dest,
-                            RegLocation rl_src1, RegLocation rl_src2) {
-  LOG(FATAL) << "Unexpected use of GenOrLong for Mips";
-}
-
-void MipsMir2Lir::GenXorLong(Instruction::Code opcode, RegLocation rl_dest,
-                             RegLocation rl_src1, RegLocation rl_src2) {
-  LOG(FATAL) << "Unexpected use of GenXorLong for Mips";
-}
-
 /*
  * Generate array load
  */
diff --git a/compiler/dex/quick/mir_to_lir-inl.h b/compiler/dex/quick/mir_to_lir-inl.h
index 9ce5bb7..ff5a46f 100644
--- a/compiler/dex/quick/mir_to_lir-inl.h
+++ b/compiler/dex/quick/mir_to_lir-inl.h
@@ -147,6 +147,15 @@
 }
 
 /*
+ * Clear the corresponding bit(s).
+ */
+inline void Mir2Lir::ClearRegMask(ResourceMask* mask, int reg) {
+  DCHECK_EQ((reg & ~RegStorage::kRegValMask), 0);
+  DCHECK(reginfo_map_.Get(reg) != nullptr) << "No info for 0x" << reg;
+  *mask = mask->ClearBits(reginfo_map_.Get(reg)->DefUseMask());
+}
+
+/*
  * Set up the proper fields in the resource mask
  */
 inline void Mir2Lir::SetupResourceMasks(LIR* lir) {
diff --git a/compiler/dex/quick/mir_to_lir.cc b/compiler/dex/quick/mir_to_lir.cc
index ed7fcdd..e519011 100644
--- a/compiler/dex/quick/mir_to_lir.cc
+++ b/compiler/dex/quick/mir_to_lir.cc
@@ -926,11 +926,11 @@
     case Instruction::XOR_INT:
     case Instruction::XOR_INT_2ADDR:
       if (rl_src[0].is_const &&
-          InexpensiveConstantInt(mir_graph_->ConstantValue(rl_src[0]))) {
+          InexpensiveConstantInt(mir_graph_->ConstantValue(rl_src[0]), opcode)) {
         GenArithOpIntLit(opcode, rl_dest, rl_src[1],
                              mir_graph_->ConstantValue(rl_src[0].orig_sreg));
       } else if (rl_src[1].is_const &&
-          InexpensiveConstantInt(mir_graph_->ConstantValue(rl_src[1]))) {
+                 InexpensiveConstantInt(mir_graph_->ConstantValue(rl_src[1]), opcode)) {
         GenArithOpIntLit(opcode, rl_dest, rl_src[0],
                              mir_graph_->ConstantValue(rl_src[1].orig_sreg));
       } else {
@@ -951,7 +951,7 @@
     case Instruction::USHR_INT:
     case Instruction::USHR_INT_2ADDR:
       if (rl_src[1].is_const &&
-          InexpensiveConstantInt(mir_graph_->ConstantValue(rl_src[1]))) {
+          InexpensiveConstantInt(mir_graph_->ConstantValue(rl_src[1]), opcode)) {
         GenArithOpIntLit(opcode, rl_dest, rl_src[0], mir_graph_->ConstantValue(rl_src[1]));
       } else {
         GenArithOpInt(opcode, rl_dest, rl_src[0], rl_src[1]);
@@ -1311,4 +1311,9 @@
       rl.ref ? RefCheck::kCheckRef : RefCheck::kCheckNotRef, FPCheck::kIgnoreFP, fail, report);
 }
 
+size_t Mir2Lir::GetInstructionOffset(LIR* lir) {
+  UNIMPLEMENTED(FATAL) << "Unsuppored GetInstructionOffset()";
+  return 0;
+}
+
 }  // namespace art
diff --git a/compiler/dex/quick/mir_to_lir.h b/compiler/dex/quick/mir_to_lir.h
index b832223..4ed9929 100644
--- a/compiler/dex/quick/mir_to_lir.h
+++ b/compiler/dex/quick/mir_to_lir.h
@@ -32,6 +32,7 @@
 #include "safe_map.h"
 #include "utils/array_ref.h"
 #include "utils/arena_allocator.h"
+#include "utils/arena_containers.h"
 #include "utils/growable_array.h"
 #include "utils/stack_checks.h"
 
@@ -51,6 +52,7 @@
 #define IS_BINARY_OP         (1ULL << kIsBinaryOp)
 #define IS_BRANCH            (1ULL << kIsBranch)
 #define IS_IT                (1ULL << kIsIT)
+#define IS_MOVE              (1ULL << kIsMoveOp)
 #define IS_LOAD              (1ULL << kMemLoad)
 #define IS_QUAD_OP           (1ULL << kIsQuadOp)
 #define IS_QUIN_OP           (1ULL << kIsQuinOp)
@@ -58,6 +60,7 @@
 #define IS_STORE             (1ULL << kMemStore)
 #define IS_TERTIARY_OP       (1ULL << kIsTertiaryOp)
 #define IS_UNARY_OP          (1ULL << kIsUnaryOp)
+#define IS_VOLATILE          (1ULL << kMemVolatile)
 #define NEEDS_FIXUP          (1ULL << kPCRelFixup)
 #define NO_OPERAND           (1ULL << kNoOperand)
 #define REG_DEF0             (1ULL << kRegDef0)
@@ -94,6 +97,20 @@
 #define REG_USE_HI           (1ULL << kUseHi)
 #define REG_DEF_LO           (1ULL << kDefLo)
 #define REG_DEF_HI           (1ULL << kDefHi)
+#define SCALED_OFFSET_X0     (1ULL << kMemScaledx0)
+#define SCALED_OFFSET_X2     (1ULL << kMemScaledx2)
+#define SCALED_OFFSET_X4     (1ULL << kMemScaledx4)
+
+// Special load/stores
+#define IS_LOADX             (IS_LOAD | IS_VOLATILE)
+#define IS_LOAD_OFF          (IS_LOAD | SCALED_OFFSET_X0)
+#define IS_LOAD_OFF2         (IS_LOAD | SCALED_OFFSET_X2)
+#define IS_LOAD_OFF4         (IS_LOAD | SCALED_OFFSET_X4)
+
+#define IS_STOREX            (IS_STORE | IS_VOLATILE)
+#define IS_STORE_OFF         (IS_STORE | SCALED_OFFSET_X0)
+#define IS_STORE_OFF2        (IS_STORE | SCALED_OFFSET_X2)
+#define IS_STORE_OFF4        (IS_STORE | SCALED_OFFSET_X4)
 
 // Common combo register usage patterns.
 #define REG_DEF01            (REG_DEF0 | REG_DEF1)
@@ -552,6 +569,12 @@
 
     virtual ~Mir2Lir() {}
 
+    /**
+     * @brief Decodes the LIR offset.
+     * @return Returns the scaled offset of LIR.
+     */
+    virtual size_t GetInstructionOffset(LIR* lir);
+
     int32_t s4FromSwitchData(const void* switch_data) {
       return *reinterpret_cast<const int32_t*>(switch_data);
     }
@@ -641,7 +664,10 @@
     void SetMemRefType(LIR* lir, bool is_load, int mem_type);
     void AnnotateDalvikRegAccess(LIR* lir, int reg_id, bool is_load, bool is64bit);
     void SetupRegMask(ResourceMask* mask, int reg);
+    void ClearRegMask(ResourceMask* mask, int reg);
     void DumpLIRInsn(LIR* arg, unsigned char* base_addr);
+    void EliminateLoad(LIR* lir, int reg_id);
+    void DumpDependentInsnPair(LIR* check_lir, LIR* this_lir, const char* type);
     void DumpPromotionMap();
     void CodegenDump();
     LIR* RawLIR(DexOffset dalvik_offset, int opcode, int op0 = 0, int op1 = 0,
@@ -842,8 +868,8 @@
                         RegLocation rl_src1, RegLocation rl_shift);
     void GenArithOpIntLit(Instruction::Code opcode, RegLocation rl_dest,
                           RegLocation rl_src, int lit);
-    void GenArithOpLong(Instruction::Code opcode, RegLocation rl_dest,
-                        RegLocation rl_src1, RegLocation rl_src2);
+    virtual void GenArithOpLong(Instruction::Code opcode, RegLocation rl_dest,
+                                RegLocation rl_src1, RegLocation rl_src2);
     void GenConversionCall(QuickEntrypointEnum trampoline, RegLocation rl_dest, RegLocation rl_src);
     virtual void GenSuspendTest(int opt_flags);
     virtual void GenSuspendTestAndBranch(int opt_flags, LIR* target);
@@ -940,10 +966,14 @@
     virtual bool GenInlinedAbsDouble(CallInfo* info) = 0;
     bool GenInlinedFloatCvt(CallInfo* info);
     bool GenInlinedDoubleCvt(CallInfo* info);
+    virtual bool GenInlinedCeil(CallInfo* info);
+    virtual bool GenInlinedFloor(CallInfo* info);
+    virtual bool GenInlinedRint(CallInfo* info);
+    virtual bool GenInlinedRound(CallInfo* info, bool is_double);
     virtual bool GenInlinedArrayCopyCharArray(CallInfo* info);
     virtual bool GenInlinedIndexOf(CallInfo* info, bool zero_based);
     bool GenInlinedStringCompareTo(CallInfo* info);
-    bool GenInlinedCurrentThread(CallInfo* info);
+    virtual bool GenInlinedCurrentThread(CallInfo* info);
     bool GenInlinedUnsafeGet(CallInfo* info, bool is_long, bool is_volatile);
     bool GenInlinedUnsafePut(CallInfo* info, bool is_long, bool is_object,
                              bool is_volatile, bool is_ordered);
@@ -1221,15 +1251,6 @@
     // Required for target - Dalvik-level generators.
     virtual void GenArithImmOpLong(Instruction::Code opcode, RegLocation rl_dest,
                                    RegLocation rl_src1, RegLocation rl_src2) = 0;
-    virtual void GenMulLong(Instruction::Code,
-                            RegLocation rl_dest, RegLocation rl_src1,
-                            RegLocation rl_src2) = 0;
-    virtual void GenAddLong(Instruction::Code,
-                            RegLocation rl_dest, RegLocation rl_src1,
-                            RegLocation rl_src2) = 0;
-    virtual void GenAndLong(Instruction::Code,
-                            RegLocation rl_dest, RegLocation rl_src1,
-                            RegLocation rl_src2) = 0;
     virtual void GenArithOpDouble(Instruction::Code opcode,
                                   RegLocation rl_dest, RegLocation rl_src1,
                                   RegLocation rl_src2) = 0;
@@ -1257,16 +1278,6 @@
     virtual bool GenInlinedSqrt(CallInfo* info) = 0;
     virtual bool GenInlinedPeek(CallInfo* info, OpSize size) = 0;
     virtual bool GenInlinedPoke(CallInfo* info, OpSize size) = 0;
-    virtual void GenNotLong(RegLocation rl_dest, RegLocation rl_src) = 0;
-    virtual void GenNegLong(RegLocation rl_dest, RegLocation rl_src) = 0;
-    virtual void GenOrLong(Instruction::Code, RegLocation rl_dest, RegLocation rl_src1,
-                           RegLocation rl_src2) = 0;
-    virtual void GenSubLong(Instruction::Code, RegLocation rl_dest, RegLocation rl_src1,
-                            RegLocation rl_src2) = 0;
-    virtual void GenXorLong(Instruction::Code, RegLocation rl_dest, RegLocation rl_src1,
-                            RegLocation rl_src2) = 0;
-    virtual void GenDivRemLong(Instruction::Code, RegLocation rl_dest, RegLocation rl_src1,
-                            RegLocation rl_src2, bool is_div) = 0;
     virtual RegLocation GenDivRem(RegLocation rl_dest, RegStorage reg_lo, RegStorage reg_hi,
                                   bool is_div) = 0;
     virtual RegLocation GenDivRemLit(RegLocation rl_dest, RegStorage reg_lo, int lit,
@@ -1416,6 +1427,9 @@
     virtual bool InexpensiveConstantFloat(int32_t value) = 0;
     virtual bool InexpensiveConstantLong(int64_t value) = 0;
     virtual bool InexpensiveConstantDouble(int64_t value) = 0;
+    virtual bool InexpensiveConstantInt(int32_t value, Instruction::Code opcode) {
+      return InexpensiveConstantInt(value);
+    }
 
     // May be optimized by targets.
     virtual void GenMonitorEnter(int opt_flags, RegLocation rl_src);
@@ -1686,8 +1700,8 @@
     CodeBuffer code_buffer_;
     // The encoding mapping table data (dex -> pc offset and pc offset -> dex) with a size prefix.
     std::vector<uint8_t> encoded_mapping_table_;
-    std::vector<uint32_t> core_vmap_table_;
-    std::vector<uint32_t> fp_vmap_table_;
+    ArenaVector<uint32_t> core_vmap_table_;
+    ArenaVector<uint32_t> fp_vmap_table_;
     std::vector<uint8_t> native_gc_map_;
     int num_core_spills_;
     int num_fp_spills_;
diff --git a/compiler/dex/quick/ralloc_util.cc b/compiler/dex/quick/ralloc_util.cc
index 45244e1..be966e1 100644
--- a/compiler/dex/quick/ralloc_util.cc
+++ b/compiler/dex/quick/ralloc_util.cc
@@ -1171,12 +1171,13 @@
       } else {
         counts[p_map_idx].count += use_count;
       }
-    } else if (!IsInexpensiveConstant(loc)) {
+    } else {
       if (loc.wide && WideGPRsAreAliases()) {
-        // Longs and doubles can be counted together.
         i++;
       }
-      counts[p_map_idx].count += use_count;
+      if (!IsInexpensiveConstant(loc)) {
+        counts[p_map_idx].count += use_count;
+      }
     }
   }
 }
@@ -1185,9 +1186,10 @@
 static int SortCounts(const void *val1, const void *val2) {
   const Mir2Lir::RefCounts* op1 = reinterpret_cast<const Mir2Lir::RefCounts*>(val1);
   const Mir2Lir::RefCounts* op2 = reinterpret_cast<const Mir2Lir::RefCounts*>(val2);
-  // Note that we fall back to sorting on reg so we get stable output
-  // on differing qsort implementations (such as on host and target or
-  // between local host and build servers).
+  // Note that we fall back to sorting on reg so we get stable output on differing qsort
+  // implementations (such as on host and target or between local host and build servers).
+  // Note also that if a wide val1 and a non-wide val2 have the same count, then val1 always
+  // ``loses'' (as STARTING_WIDE_SREG is or-ed in val1->s_reg).
   return (op1->count == op2->count)
           ? (op1->s_reg - op2->s_reg)
           : (op1->count < op2->count ? 1 : -1);
@@ -1230,8 +1232,8 @@
    * TUNING: replace with linear scan once we have the ability
    * to describe register live ranges for GC.
    */
-  size_t core_reg_count_size = cu_->target64 ? num_regs * 2 : num_regs;
-  size_t fp_reg_count_size = num_regs * 2;
+  size_t core_reg_count_size = WideGPRsAreAliases() ? num_regs : num_regs * 2;
+  size_t fp_reg_count_size = WideFPRsAreAliases() ? num_regs : num_regs * 2;
   RefCounts *core_regs =
       static_cast<RefCounts*>(arena_->Alloc(sizeof(RefCounts) * core_reg_count_size,
                                             kArenaAllocRegAlloc));
@@ -1261,7 +1263,6 @@
   // Sum use counts of SSA regs by original Dalvik vreg.
   CountRefs(core_regs, fp_regs, num_regs);
 
-
   // Sort the count arrays
   qsort(core_regs, core_reg_count_size, sizeof(RefCounts), SortCounts);
   qsort(fp_regs, fp_reg_count_size, sizeof(RefCounts), SortCounts);
diff --git a/compiler/dex/quick/x86/assemble_x86.cc b/compiler/dex/quick/x86/assemble_x86.cc
index b7a7512..2eed731 100644
--- a/compiler/dex/quick/x86/assemble_x86.cc
+++ b/compiler/dex/quick/x86/assemble_x86.cc
@@ -35,7 +35,7 @@
                      rm32_i32, rm32_i32_modrm, \
                      rm32_i8, rm32_i8_modrm) \
 { kX86 ## opname ## 8MR, kMemReg,    mem_use | IS_TERTIARY_OP |           REG_USE02  | SETS_CCODES | uses_ccodes, { 0,             0, rm8_r8, 0, 0, 0,            0,      0, true }, #opname "8MR", "[!0r+!1d],!2r" }, \
-{ kX86 ## opname ## 8AR, kArrayReg,  mem_use | IS_QUIN_OP     |           REG_USE014 | SETS_CCODES | uses_ccodes, { 0,             0, rm8_r8, 0, 0, 0,            0,      0, true}, #opname "8AR", "[!0r+!1r<<!2d+!3d],!4r" }, \
+{ kX86 ## opname ## 8AR, kArrayReg,  mem_use | IS_QUIN_OP     |           REG_USE014 | SETS_CCODES | uses_ccodes, { 0,             0, rm8_r8, 0, 0, 0,            0,      0, true }, #opname "8AR", "[!0r+!1r<<!2d+!3d],!4r" }, \
 { kX86 ## opname ## 8TR, kThreadReg, mem_use | IS_BINARY_OP   |           REG_USE1   | SETS_CCODES | uses_ccodes, { THREAD_PREFIX, 0, rm8_r8, 0, 0, 0,            0,      0, true }, #opname "8TR", "fs:[!0d],!1r" }, \
 { kX86 ## opname ## 8RR, kRegReg,              IS_BINARY_OP   | reg_def | REG_USE01  | SETS_CCODES | uses_ccodes, { 0,             0, r8_rm8, 0, 0, 0,            0,      0, true }, #opname "8RR", "!0r,!1r" }, \
 { kX86 ## opname ## 8RM, kRegMem,    IS_LOAD | IS_TERTIARY_OP | reg_def | REG_USE01  | SETS_CCODES | uses_ccodes, { 0,             0, r8_rm8, 0, 0, 0,            0,      0, true }, #opname "8RM", "!0r,[!1r+!2d]" }, \
@@ -286,7 +286,7 @@
 
   { kX86Test32RR, kRegReg,             IS_BINARY_OP   | REG_USE01 | SETS_CCODES, { 0,     0, 0x85, 0, 0, 0, 0, 0, false }, "Test32RR", "!0r,!1r" },
   { kX86Test64RR, kRegReg,             IS_BINARY_OP   | REG_USE01 | SETS_CCODES, { REX_W, 0, 0x85, 0, 0, 0, 0, 0, false }, "Test64RR", "!0r,!1r" },
-  { kX86Test32RM, kRegMem,   IS_LOAD | IS_TERTIARY_OP | REG_USE0  | SETS_CCODES, { 0,     0, 0x85, 0, 0, 0, 0, 0, false }, "Test32RM", "!0r,[!1r+!1d]" },
+  { kX86Test32RM, kRegMem,   IS_LOAD | IS_TERTIARY_OP | REG_USE01 | SETS_CCODES, { 0,     0, 0x85, 0, 0, 0, 0, 0, false }, "Test32RM", "!0r,[!1r+!2d]" },
 
 #define UNARY_ENCODING_MAP(opname, modrm, is_store, sets_ccodes, \
                            reg, reg_kind, reg_flags, \
@@ -478,7 +478,7 @@
   { kX86MovsxdRM, kRegMem,      IS_LOAD | IS_TERTIARY_OP | REG_DEF0 | REG_USE1,  { REX_W, 0, 0x63, 0, 0, 0, 0, 0, false }, "MovsxdRM", "!0r,[!1r+!2d]" },
   { kX86MovsxdRA, kRegArray,    IS_LOAD | IS_QUIN_OP     | REG_DEF0 | REG_USE12, { REX_W, 0, 0x63, 0, 0, 0, 0, 0, false }, "MovsxdRA", "!0r,[!1r+!2r<<!3d+!4d]" },
 
-  { kX86Set8R, kRegCond,              IS_BINARY_OP   | REG_DEF0  | USES_CCODES, { 0, 0, 0x0F, 0x90, 0, 0, 0, 0, true }, "Set8R", "!1c !0r" },
+  { kX86Set8R, kRegCond,   IS_BINARY_OP | REG_DEF0   | REG_USE0  | USES_CCODES, { 0, 0, 0x0F, 0x90, 0, 0, 0, 0, true  }, "Set8R", "!1c !0r" },
   { kX86Set8M, kMemCond,   IS_STORE | IS_TERTIARY_OP | REG_USE0  | USES_CCODES, { 0, 0, 0x0F, 0x90, 0, 0, 0, 0, false }, "Set8M", "!2c [!0r+!1d]" },
   { kX86Set8A, kArrayCond, IS_STORE | IS_QUIN_OP     | REG_USE01 | USES_CCODES, { 0, 0, 0x0F, 0x90, 0, 0, 0, 0, false }, "Set8A", "!4c [!0r+!1r<<!2d+!3d]" },
 
diff --git a/compiler/dex/quick/x86/codegen_x86.h b/compiler/dex/quick/x86/codegen_x86.h
index 0a46f2e..266191a 100644
--- a/compiler/dex/quick/x86/codegen_x86.h
+++ b/compiler/dex/quick/x86/codegen_x86.h
@@ -65,7 +65,7 @@
 
   // Required for target - codegen helpers.
   bool SmallLiteralDivRem(Instruction::Code dalvik_opcode, bool is_div, RegLocation rl_src,
-                          RegLocation rl_dest, int lit);
+                          RegLocation rl_dest, int lit) OVERRIDE;
   bool EasyMultiply(RegLocation rl_src, RegLocation rl_dest, int lit) OVERRIDE;
   LIR* CheckSuspendUsingLoad() OVERRIDE;
   RegStorage LoadHelper(QuickEntrypointEnum trampoline) OVERRIDE;
@@ -73,22 +73,17 @@
                     OpSize size, VolatileKind is_volatile) OVERRIDE;
   LIR* LoadBaseIndexed(RegStorage r_base, RegStorage r_index, RegStorage r_dest, int scale,
                        OpSize size) OVERRIDE;
-  LIR* LoadBaseIndexedDisp(RegStorage r_base, RegStorage r_index, int scale, int displacement,
-                           RegStorage r_dest, OpSize size);
   LIR* LoadConstantNoClobber(RegStorage r_dest, int value);
   LIR* LoadConstantWide(RegStorage r_dest, int64_t value);
   LIR* StoreBaseDisp(RegStorage r_base, int displacement, RegStorage r_src,
                      OpSize size, VolatileKind is_volatile) OVERRIDE;
   LIR* StoreBaseIndexed(RegStorage r_base, RegStorage r_index, RegStorage r_src, int scale,
                         OpSize size) OVERRIDE;
-  LIR* StoreBaseIndexedDisp(RegStorage r_base, RegStorage r_index, int scale, int displacement,
-                            RegStorage r_src, OpSize size);
-  void MarkGCCard(RegStorage val_reg, RegStorage tgt_addr_reg);
-  void GenImplicitNullCheck(RegStorage reg, int opt_flags);
+  void MarkGCCard(RegStorage val_reg, RegStorage tgt_addr_reg) OVERRIDE;
+  void GenImplicitNullCheck(RegStorage reg, int opt_flags) OVERRIDE;
 
   // Required for target - register utilities.
   RegStorage TargetReg(SpecialTargetRegister reg) OVERRIDE;
-  RegStorage TargetReg32(SpecialTargetRegister reg);
   RegStorage TargetReg(SpecialTargetRegister symbolic_reg, WideKind wide_kind) OVERRIDE {
     if (wide_kind == kWide) {
       if (cu_->target64) {
@@ -110,111 +105,78 @@
   RegStorage TargetPtrReg(SpecialTargetRegister symbolic_reg) OVERRIDE {
     return TargetReg(symbolic_reg, cu_->target64 ? kWide : kNotWide);
   }
-  RegStorage GetArgMappingToPhysicalReg(int arg_num);
-  RegStorage GetCoreArgMappingToPhysicalReg(int core_arg_num);
-  RegLocation GetReturnAlt();
-  RegLocation GetReturnWideAlt();
-  RegLocation LocCReturn();
-  RegLocation LocCReturnRef();
-  RegLocation LocCReturnDouble();
-  RegLocation LocCReturnFloat();
-  RegLocation LocCReturnWide();
+
+  RegStorage GetArgMappingToPhysicalReg(int arg_num) OVERRIDE;
+
+  RegLocation GetReturnAlt() OVERRIDE;
+  RegLocation GetReturnWideAlt() OVERRIDE;
+  RegLocation LocCReturn() OVERRIDE;
+  RegLocation LocCReturnRef() OVERRIDE;
+  RegLocation LocCReturnDouble() OVERRIDE;
+  RegLocation LocCReturnFloat() OVERRIDE;
+  RegLocation LocCReturnWide() OVERRIDE;
+
   ResourceMask GetRegMaskCommon(const RegStorage& reg) const OVERRIDE;
-  void AdjustSpillMask();
-  void ClobberCallerSave();
-  void FreeCallTemps();
-  void LockCallTemps();
-  void CompilerInitializeRegAlloc();
-  int VectorRegisterSize();
-  int NumReservableVectorRegisters(bool fp_used);
+  void AdjustSpillMask() OVERRIDE;
+  void ClobberCallerSave() OVERRIDE;
+  void FreeCallTemps() OVERRIDE;
+  void LockCallTemps() OVERRIDE;
+
+  void CompilerInitializeRegAlloc() OVERRIDE;
+  int VectorRegisterSize() OVERRIDE;
+  int NumReservableVectorRegisters(bool fp_used) OVERRIDE;
 
   // Required for target - miscellaneous.
-  void AssembleLIR();
-  int AssignInsnOffsets();
-  void AssignOffsets();
-  AssemblerStatus AssembleInstructions(CodeOffset start_addr);
+  void AssembleLIR() OVERRIDE;
   void DumpResourceMask(LIR* lir, const ResourceMask& mask, const char* prefix) OVERRIDE;
   void SetupTargetResourceMasks(LIR* lir, uint64_t flags,
                                 ResourceMask* use_mask, ResourceMask* def_mask) OVERRIDE;
-  const char* GetTargetInstFmt(int opcode);
-  const char* GetTargetInstName(int opcode);
-  std::string BuildInsnString(const char* fmt, LIR* lir, unsigned char* base_addr);
+  const char* GetTargetInstFmt(int opcode) OVERRIDE;
+  const char* GetTargetInstName(int opcode) OVERRIDE;
+  std::string BuildInsnString(const char* fmt, LIR* lir, unsigned char* base_addr) OVERRIDE;
   ResourceMask GetPCUseDefEncoding() const OVERRIDE;
-  uint64_t GetTargetInstFlags(int opcode);
+  uint64_t GetTargetInstFlags(int opcode) OVERRIDE;
   size_t GetInsnSize(LIR* lir) OVERRIDE;
-  bool IsUnconditionalBranch(LIR* lir);
+  bool IsUnconditionalBranch(LIR* lir) OVERRIDE;
 
   // Get the register class for load/store of a field.
   RegisterClass RegClassForFieldLoadStore(OpSize size, bool is_volatile) OVERRIDE;
 
   // Required for target - Dalvik-level generators.
-  void GenArithImmOpLong(Instruction::Code opcode, RegLocation rl_dest, RegLocation rl_src1,
-                         RegLocation rl_src2);
   void GenArrayGet(int opt_flags, OpSize size, RegLocation rl_array, RegLocation rl_index,
-                   RegLocation rl_dest, int scale);
+                   RegLocation rl_dest, int scale) OVERRIDE;
   void GenArrayPut(int opt_flags, OpSize size, RegLocation rl_array,
-                   RegLocation rl_index, RegLocation rl_src, int scale, bool card_mark);
-  void GenShiftImmOpLong(Instruction::Code opcode, RegLocation rl_dest,
-                         RegLocation rl_src1, RegLocation rl_shift);
-  void GenMulLong(Instruction::Code opcode, RegLocation rl_dest, RegLocation rl_src1,
-                  RegLocation rl_src2);
-  void GenAddLong(Instruction::Code opcode, RegLocation rl_dest, RegLocation rl_src1,
-                  RegLocation rl_src2);
-  void GenAndLong(Instruction::Code opcode, RegLocation rl_dest, RegLocation rl_src1,
-                  RegLocation rl_src2);
+                   RegLocation rl_index, RegLocation rl_src, int scale, bool card_mark) OVERRIDE;
+
   void GenArithOpDouble(Instruction::Code opcode, RegLocation rl_dest, RegLocation rl_src1,
-                        RegLocation rl_src2);
+                        RegLocation rl_src2) OVERRIDE;
   void GenArithOpFloat(Instruction::Code opcode, RegLocation rl_dest, RegLocation rl_src1,
-                       RegLocation rl_src2);
-  void GenRemFP(RegLocation rl_dest, RegLocation rl_src1, RegLocation rl_src2, bool is_double);
+                       RegLocation rl_src2) OVERRIDE;
   void GenCmpFP(Instruction::Code opcode, RegLocation rl_dest, RegLocation rl_src1,
-                RegLocation rl_src2);
-  void GenConversion(Instruction::Code opcode, RegLocation rl_dest, RegLocation rl_src);
-  bool GenInlinedCas(CallInfo* info, bool is_long, bool is_object);
-  bool GenInlinedMinMax(CallInfo* info, bool is_min, bool is_long);
-  bool GenInlinedMinMaxFP(CallInfo* info, bool is_min, bool is_double);
-  bool GenInlinedSqrt(CallInfo* info);
+                RegLocation rl_src2) OVERRIDE;
+  void GenConversion(Instruction::Code opcode, RegLocation rl_dest, RegLocation rl_src) OVERRIDE;
+
+  bool GenInlinedCas(CallInfo* info, bool is_long, bool is_object) OVERRIDE;
+  bool GenInlinedMinMax(CallInfo* info, bool is_min, bool is_long) OVERRIDE;
+  bool GenInlinedMinMaxFP(CallInfo* info, bool is_min, bool is_double) OVERRIDE;
+  bool GenInlinedSqrt(CallInfo* info) OVERRIDE;
   bool GenInlinedAbsFloat(CallInfo* info) OVERRIDE;
   bool GenInlinedAbsDouble(CallInfo* info) OVERRIDE;
-  bool GenInlinedPeek(CallInfo* info, OpSize size);
-  bool GenInlinedPoke(CallInfo* info, OpSize size);
+  bool GenInlinedPeek(CallInfo* info, OpSize size) OVERRIDE;
+  bool GenInlinedPoke(CallInfo* info, OpSize size) OVERRIDE;
   bool GenInlinedCharAt(CallInfo* info) OVERRIDE;
-  void GenNotLong(RegLocation rl_dest, RegLocation rl_src);
-  void GenNegLong(RegLocation rl_dest, RegLocation rl_src);
-  void GenOrLong(Instruction::Code opcode, RegLocation rl_dest, RegLocation rl_src1,
-                 RegLocation rl_src2);
-  void GenSubLong(Instruction::Code opcode, RegLocation rl_dest, RegLocation rl_src1,
-                  RegLocation rl_src2);
-  void GenXorLong(Instruction::Code opcode, RegLocation rl_dest, RegLocation rl_src1,
-                  RegLocation rl_src2);
-  void GenDivRemLong(Instruction::Code, RegLocation rl_dest, RegLocation rl_src1,
-                     RegLocation rl_src2, bool is_div);
-  // TODO: collapse reg_lo, reg_hi
-  RegLocation GenDivRem(RegLocation rl_dest, RegStorage reg_lo, RegStorage reg_hi, bool is_div);
-  RegLocation GenDivRemLit(RegLocation rl_dest, RegStorage reg_lo, int lit, bool is_div);
-  void GenCmpLong(RegLocation rl_dest, RegLocation rl_src1, RegLocation rl_src2);
-  void GenDivZeroCheckWide(RegStorage reg);
-  void GenArrayBoundsCheck(RegStorage index, RegStorage array_base, int32_t len_offset);
-  void GenArrayBoundsCheck(int32_t index, RegStorage array_base, int32_t len_offset);
-  void GenEntrySequence(RegLocation* ArgLocs, RegLocation rl_method);
-  void GenExitSequence();
-  void GenSpecialExitSequence();
-  void GenFillArrayData(DexOffset table_offset, RegLocation rl_src);
-  void GenFusedFPCmpBranch(BasicBlock* bb, MIR* mir, bool gt_bias, bool is_double);
-  void GenFusedLongCmpBranch(BasicBlock* bb, MIR* mir);
-  void GenSelect(BasicBlock* bb, MIR* mir);
-  void GenSelectConst32(RegStorage left_op, RegStorage right_op, ConditionCode code,
-                        int32_t true_val, int32_t false_val, RegStorage rs_dest,
-                        int dest_reg_class) OVERRIDE;
-  bool GenMemBarrier(MemBarrierKind barrier_kind);
-  void GenMoveException(RegLocation rl_dest);
-  void GenMultiplyByTwoBitMultiplier(RegLocation rl_src, RegLocation rl_result, int lit,
-                                     int first_bit, int second_bit);
-  void GenNegDouble(RegLocation rl_dest, RegLocation rl_src);
-  void GenNegFloat(RegLocation rl_dest, RegLocation rl_src);
-  void GenPackedSwitch(MIR* mir, DexOffset table_offset, RegLocation rl_src);
-  void GenSparseSwitch(MIR* mir, DexOffset table_offset, RegLocation rl_src);
-  void GenIntToLong(RegLocation rl_dest, RegLocation rl_src);
+
+  // Long instructions.
+  void GenArithOpLong(Instruction::Code opcode, RegLocation rl_dest, RegLocation rl_src1,
+                      RegLocation rl_src2) OVERRIDE;
+  void GenArithImmOpLong(Instruction::Code opcode, RegLocation rl_dest, RegLocation rl_src1,
+                         RegLocation rl_src2) OVERRIDE;
+  void GenShiftImmOpLong(Instruction::Code opcode, RegLocation rl_dest,
+                         RegLocation rl_src1, RegLocation rl_shift) OVERRIDE;
+  void GenCmpLong(RegLocation rl_dest, RegLocation rl_src1, RegLocation rl_src2) OVERRIDE;
+  void GenIntToLong(RegLocation rl_dest, RegLocation rl_src) OVERRIDE;
+  void GenShiftOpLong(Instruction::Code opcode, RegLocation rl_dest,
+                      RegLocation rl_src1, RegLocation rl_shift) OVERRIDE;
 
   /*
    * @brief Generate a two address long operation with a constant value
@@ -224,6 +186,7 @@
    * @return success or not
    */
   bool GenLongImm(RegLocation rl_dest, RegLocation rl_src, Instruction::Code op);
+
   /*
    * @brief Generate a three address long operation with a constant value
    * @param rl_dest location of result
@@ -234,7 +197,6 @@
    */
   bool GenLongLongImm(RegLocation rl_dest, RegLocation rl_src1, RegLocation rl_src2,
                       Instruction::Code op);
-
   /**
    * @brief Generate a long arithmetic operation.
    * @param rl_dest The destination.
@@ -262,6 +224,31 @@
    */
   virtual void GenLongRegOrMemOp(RegLocation rl_dest, RegLocation rl_src, Instruction::Code op);
 
+
+  // TODO: collapse reg_lo, reg_hi
+  RegLocation GenDivRem(RegLocation rl_dest, RegStorage reg_lo, RegStorage reg_hi, bool is_div)
+      OVERRIDE;
+  RegLocation GenDivRemLit(RegLocation rl_dest, RegStorage reg_lo, int lit, bool is_div) OVERRIDE;
+  void GenDivZeroCheckWide(RegStorage reg) OVERRIDE;
+  void GenEntrySequence(RegLocation* ArgLocs, RegLocation rl_method) OVERRIDE;
+  void GenExitSequence() OVERRIDE;
+  void GenSpecialExitSequence() OVERRIDE;
+  void GenFillArrayData(DexOffset table_offset, RegLocation rl_src) OVERRIDE;
+  void GenFusedFPCmpBranch(BasicBlock* bb, MIR* mir, bool gt_bias, bool is_double) OVERRIDE;
+  void GenFusedLongCmpBranch(BasicBlock* bb, MIR* mir) OVERRIDE;
+  void GenSelect(BasicBlock* bb, MIR* mir) OVERRIDE;
+  void GenSelectConst32(RegStorage left_op, RegStorage right_op, ConditionCode code,
+                        int32_t true_val, int32_t false_val, RegStorage rs_dest,
+                        int dest_reg_class) OVERRIDE;
+  bool GenMemBarrier(MemBarrierKind barrier_kind) OVERRIDE;
+  void GenMoveException(RegLocation rl_dest) OVERRIDE;
+  void GenMultiplyByTwoBitMultiplier(RegLocation rl_src, RegLocation rl_result, int lit,
+                                     int first_bit, int second_bit) OVERRIDE;
+  void GenNegDouble(RegLocation rl_dest, RegLocation rl_src) OVERRIDE;
+  void GenNegFloat(RegLocation rl_dest, RegLocation rl_src) OVERRIDE;
+  void GenPackedSwitch(MIR* mir, DexOffset table_offset, RegLocation rl_src) OVERRIDE;
+  void GenSparseSwitch(MIR* mir, DexOffset table_offset, RegLocation rl_src) OVERRIDE;
+
   /**
    * @brief Implement instanceof a final class with x86 specific code.
    * @param use_declaring_class 'true' if we can use the class itself.
@@ -270,56 +257,39 @@
    * @param rl_src Object to be tested.
    */
   void GenInstanceofFinal(bool use_declaring_class, uint32_t type_idx, RegLocation rl_dest,
-                          RegLocation rl_src);
-
-  void GenShiftOpLong(Instruction::Code opcode, RegLocation rl_dest,
-                      RegLocation rl_src1, RegLocation rl_shift);
+                          RegLocation rl_src) OVERRIDE;
 
   // Single operation generators.
-  LIR* OpUnconditionalBranch(LIR* target);
-  LIR* OpCmpBranch(ConditionCode cond, RegStorage src1, RegStorage src2, LIR* target);
-  LIR* OpCmpImmBranch(ConditionCode cond, RegStorage reg, int check_value, LIR* target);
-  LIR* OpCondBranch(ConditionCode cc, LIR* target);
-  LIR* OpDecAndBranch(ConditionCode c_code, RegStorage reg, LIR* target);
-  LIR* OpFpRegCopy(RegStorage r_dest, RegStorage r_src);
-  LIR* OpIT(ConditionCode cond, const char* guide);
-  void OpEndIT(LIR* it);
-  LIR* OpMem(OpKind op, RegStorage r_base, int disp);
-  LIR* OpPcRelLoad(RegStorage reg, LIR* target);
-  LIR* OpReg(OpKind op, RegStorage r_dest_src);
-  void OpRegCopy(RegStorage r_dest, RegStorage r_src);
-  LIR* OpRegCopyNoInsert(RegStorage r_dest, RegStorage r_src);
-  LIR* OpRegImm(OpKind op, RegStorage r_dest_src1, int value);
-  LIR* OpRegMem(OpKind op, RegStorage r_dest, RegStorage r_base, int offset);
-  LIR* OpRegMem(OpKind op, RegStorage r_dest, RegLocation value);
-  LIR* OpMemReg(OpKind op, RegLocation rl_dest, int value);
-  LIR* OpRegReg(OpKind op, RegStorage r_dest_src1, RegStorage r_src2);
-  LIR* OpMovRegMem(RegStorage r_dest, RegStorage r_base, int offset, MoveType move_type);
-  LIR* OpMovMemReg(RegStorage r_base, int offset, RegStorage r_src, MoveType move_type);
-  LIR* OpCondRegReg(OpKind op, ConditionCode cc, RegStorage r_dest, RegStorage r_src);
-  LIR* OpRegRegImm(OpKind op, RegStorage r_dest, RegStorage r_src1, int value);
-  LIR* OpRegRegReg(OpKind op, RegStorage r_dest, RegStorage r_src1, RegStorage r_src2);
-  LIR* OpTestSuspend(LIR* target);
-  LIR* OpThreadMem(OpKind op, ThreadOffset<4> thread_offset);
-  LIR* OpThreadMem(OpKind op, ThreadOffset<8> thread_offset);
-  LIR* OpVldm(RegStorage r_base, int count);
-  LIR* OpVstm(RegStorage r_base, int count);
-  void OpLea(RegStorage r_base, RegStorage reg1, RegStorage reg2, int scale, int offset);
-  void OpRegCopyWide(RegStorage dest, RegStorage src);
-  void OpTlsCmp(ThreadOffset<4> offset, int val);
-  void OpTlsCmp(ThreadOffset<8> offset, int val);
+  LIR* OpUnconditionalBranch(LIR* target) OVERRIDE;
+  LIR* OpCmpBranch(ConditionCode cond, RegStorage src1, RegStorage src2, LIR* target) OVERRIDE;
+  LIR* OpCmpImmBranch(ConditionCode cond, RegStorage reg, int check_value, LIR* target) OVERRIDE;
+  LIR* OpCondBranch(ConditionCode cc, LIR* target) OVERRIDE;
+  LIR* OpDecAndBranch(ConditionCode c_code, RegStorage reg, LIR* target) OVERRIDE;
+  LIR* OpFpRegCopy(RegStorage r_dest, RegStorage r_src) OVERRIDE;
+  LIR* OpIT(ConditionCode cond, const char* guide) OVERRIDE;
+  void OpEndIT(LIR* it) OVERRIDE;
+  LIR* OpMem(OpKind op, RegStorage r_base, int disp) OVERRIDE;
+  LIR* OpPcRelLoad(RegStorage reg, LIR* target) OVERRIDE;
+  LIR* OpReg(OpKind op, RegStorage r_dest_src) OVERRIDE;
+  void OpRegCopy(RegStorage r_dest, RegStorage r_src) OVERRIDE;
+  LIR* OpRegCopyNoInsert(RegStorage r_dest, RegStorage r_src) OVERRIDE;
+  LIR* OpRegImm(OpKind op, RegStorage r_dest_src1, int value) OVERRIDE;
+  LIR* OpRegReg(OpKind op, RegStorage r_dest_src1, RegStorage r_src2) OVERRIDE;
+  LIR* OpMovRegMem(RegStorage r_dest, RegStorage r_base, int offset, MoveType move_type) OVERRIDE;
+  LIR* OpMovMemReg(RegStorage r_base, int offset, RegStorage r_src, MoveType move_type) OVERRIDE;
+  LIR* OpCondRegReg(OpKind op, ConditionCode cc, RegStorage r_dest, RegStorage r_src) OVERRIDE;
+  LIR* OpRegRegImm(OpKind op, RegStorage r_dest, RegStorage r_src1, int value) OVERRIDE;
+  LIR* OpRegRegReg(OpKind op, RegStorage r_dest, RegStorage r_src1, RegStorage r_src2) OVERRIDE;
+  LIR* OpTestSuspend(LIR* target) OVERRIDE;
+  LIR* OpVldm(RegStorage r_base, int count) OVERRIDE;
+  LIR* OpVstm(RegStorage r_base, int count) OVERRIDE;
+  void OpRegCopyWide(RegStorage dest, RegStorage src) OVERRIDE;
+  bool GenInlinedCurrentThread(CallInfo* info) OVERRIDE;
 
-  void OpRegThreadMem(OpKind op, RegStorage r_dest, ThreadOffset<4> thread_offset);
-  void OpRegThreadMem(OpKind op, RegStorage r_dest, ThreadOffset<8> thread_offset);
-  void SpillCoreRegs();
-  void UnSpillCoreRegs();
-  void UnSpillFPRegs();
-  void SpillFPRegs();
-  static const X86EncodingMap EncodingMap[kX86Last];
-  bool InexpensiveConstantInt(int32_t value);
-  bool InexpensiveConstantFloat(int32_t value);
-  bool InexpensiveConstantLong(int64_t value);
-  bool InexpensiveConstantDouble(int64_t value);
+  bool InexpensiveConstantInt(int32_t value) OVERRIDE;
+  bool InexpensiveConstantFloat(int32_t value) OVERRIDE;
+  bool InexpensiveConstantLong(int64_t value) OVERRIDE;
+  bool InexpensiveConstantDouble(int64_t value) OVERRIDE;
 
   /*
    * @brief Should try to optimize for two address instructions?
@@ -335,13 +305,7 @@
    * @param rl_rhs Right hand operand.
    */
   void GenArithOpInt(Instruction::Code opcode, RegLocation rl_dest, RegLocation rl_lhs,
-                     RegLocation rl_rhs);
-
-  /*
-   * @brief Dump a RegLocation using printf
-   * @param loc Register location to dump
-   */
-  static void DumpRegLocation(RegLocation loc);
+                     RegLocation rl_rhs) OVERRIDE;
 
   /*
    * @brief Load the Method* of a dex method into the register.
@@ -351,7 +315,7 @@
    * @note register will be passed to TargetReg to get physical register.
    */
   void LoadMethodAddress(const MethodReference& target_method, InvokeType type,
-                         SpecialTargetRegister symbolic_reg);
+                         SpecialTargetRegister symbolic_reg) OVERRIDE;
 
   /*
    * @brief Load the Class* of a Dex Class type into the register.
@@ -359,23 +323,23 @@
    * @param register that will contain the code address.
    * @note register will be passed to TargetReg to get physical register.
    */
-  void LoadClassType(uint32_t type_idx, SpecialTargetRegister symbolic_reg);
+  void LoadClassType(uint32_t type_idx, SpecialTargetRegister symbolic_reg) OVERRIDE;
 
-  void FlushIns(RegLocation* ArgLocs, RegLocation rl_method);
+  void FlushIns(RegLocation* ArgLocs, RegLocation rl_method) OVERRIDE;
 
   int GenDalvikArgsNoRange(CallInfo* info, int call_state, LIR** pcrLabel,
                            NextCallInsn next_call_insn,
                            const MethodReference& target_method,
                            uint32_t vtable_idx,
                            uintptr_t direct_code, uintptr_t direct_method, InvokeType type,
-                           bool skip_this);
+                           bool skip_this) OVERRIDE;
 
   int GenDalvikArgsRange(CallInfo* info, int call_state, LIR** pcrLabel,
                          NextCallInsn next_call_insn,
                          const MethodReference& target_method,
                          uint32_t vtable_idx,
                          uintptr_t direct_code, uintptr_t direct_method, InvokeType type,
-                         bool skip_this);
+                         bool skip_this) OVERRIDE;
 
   /*
    * @brief Generate a relative call to the method that will be patched at link time.
@@ -388,7 +352,7 @@
   /*
    * @brief Handle x86 specific literals
    */
-  void InstallLiteralPools();
+  void InstallLiteralPools() OVERRIDE;
 
   /*
    * @brief Generate the debug_frame CFI information.
@@ -400,11 +364,12 @@
    * @brief Generate the debug_frame FDE information.
    * @returns pointer to vector containing CFE information
    */
-  std::vector<uint8_t>* ReturnCallFrameInformation();
+  std::vector<uint8_t>* ReturnCallFrameInformation() OVERRIDE;
 
   LIR* InvokeTrampoline(OpKind op, RegStorage r_tgt, QuickEntrypointEnum trampoline) OVERRIDE;
 
  protected:
+  RegStorage TargetReg32(SpecialTargetRegister reg);
   // Casting of RegStorage
   RegStorage As32BitReg(RegStorage reg) {
     DCHECK(!reg.IsPair());
@@ -442,6 +407,17 @@
     return ret_val;
   }
 
+  LIR* LoadBaseIndexedDisp(RegStorage r_base, RegStorage r_index, int scale, int displacement,
+                           RegStorage r_dest, OpSize size);
+  LIR* StoreBaseIndexedDisp(RegStorage r_base, RegStorage r_index, int scale, int displacement,
+                            RegStorage r_src, OpSize size);
+
+  RegStorage GetCoreArgMappingToPhysicalReg(int core_arg_num);
+
+  int AssignInsnOffsets();
+  void AssignOffsets();
+  AssemblerStatus AssembleInstructions(CodeOffset start_addr);
+
   size_t ComputeSize(const X86EncodingMap* entry, int32_t raw_reg, int32_t raw_index,
                      int32_t raw_base, int32_t displacement);
   void CheckValidByteRegister(const X86EncodingMap* entry, int32_t raw_reg);
@@ -528,6 +504,9 @@
    * @returns true if a register is byte addressable.
    */
   bool IsByteRegister(RegStorage reg);
+
+  void GenDivRemLongLit(RegLocation rl_dest, RegLocation rl_src, int64_t imm, bool is_div);
+
   bool GenInlinedArrayCopyCharArray(CallInfo* info) OVERRIDE;
 
   /*
@@ -736,8 +715,9 @@
    * @param divisor divisor number for calculation
    * @param magic hold calculated magic number
    * @param shift hold calculated shift
+   * @param is_long 'true' if divisor is jlong, 'false' for jint.
    */
-  void CalculateMagicAndShift(int divisor, int& magic, int& shift);
+  void CalculateMagicAndShift(int64_t divisor, int64_t& magic, int& shift, bool is_long);
 
   /*
    * @brief Generate an integer div or rem operation.
@@ -800,6 +780,8 @@
   LIR* OpCmpMemImmBranch(ConditionCode cond, RegStorage temp_reg, RegStorage base_reg,
                          int offset, int check_value, LIR* target, LIR** compare);
 
+  void GenRemFP(RegLocation rl_dest, RegLocation rl_src1, RegLocation rl_src2, bool is_double);
+
   /*
    * Can this operation be using core registers without temporaries?
    * @param rl_lhs Left hand operand.
@@ -816,6 +798,36 @@
    */
   virtual void GenLongToFP(RegLocation rl_dest, RegLocation rl_src, bool is_double);
 
+  void GenArrayBoundsCheck(RegStorage index, RegStorage array_base, int32_t len_offset);
+  void GenArrayBoundsCheck(int32_t index, RegStorage array_base, int32_t len_offset);
+
+  LIR* OpRegMem(OpKind op, RegStorage r_dest, RegStorage r_base, int offset);
+  LIR* OpRegMem(OpKind op, RegStorage r_dest, RegLocation value);
+  LIR* OpMemReg(OpKind op, RegLocation rl_dest, int value);
+  LIR* OpThreadMem(OpKind op, ThreadOffset<4> thread_offset);
+  LIR* OpThreadMem(OpKind op, ThreadOffset<8> thread_offset);
+  void OpRegThreadMem(OpKind op, RegStorage r_dest, ThreadOffset<4> thread_offset);
+  void OpRegThreadMem(OpKind op, RegStorage r_dest, ThreadOffset<8> thread_offset);
+  void OpTlsCmp(ThreadOffset<4> offset, int val);
+  void OpTlsCmp(ThreadOffset<8> offset, int val);
+
+  void OpLea(RegStorage r_base, RegStorage reg1, RegStorage reg2, int scale, int offset);
+
+  // Try to do a long multiplication where rl_src2 is a constant. This simplified setup might fail,
+  // in which case false will be returned.
+  bool GenMulLongConst(RegLocation rl_dest, RegLocation rl_src1, int64_t val);
+  void GenMulLong(Instruction::Code opcode, RegLocation rl_dest, RegLocation rl_src1,
+                  RegLocation rl_src2);
+  void GenNotLong(RegLocation rl_dest, RegLocation rl_src);
+  void GenNegLong(RegLocation rl_dest, RegLocation rl_src);
+  void GenDivRemLong(Instruction::Code, RegLocation rl_dest, RegLocation rl_src1,
+                     RegLocation rl_src2, bool is_div);
+
+  void SpillCoreRegs();
+  void UnSpillCoreRegs();
+  void UnSpillFPRegs();
+  void SpillFPRegs();
+
   /*
    * @brief Perform MIR analysis before compiling method.
    * @note Invokes Mir2LiR::Materialize after analysis.
@@ -938,6 +950,14 @@
     return true;  // xmm registers have 64b views even on x86.
   }
 
+  /*
+   * @brief Dump a RegLocation using printf
+   * @param loc Register location to dump
+   */
+  static void DumpRegLocation(RegLocation loc);
+
+  static const X86EncodingMap EncodingMap[kX86Last];
+
  private:
   // The number of vector registers [0..N] reserved by a call to ReserveVectorRegisters
   int num_reserved_vector_regs_;
diff --git a/compiler/dex/quick/x86/int_x86.cc b/compiler/dex/quick/x86/int_x86.cc
index b9abdbf..fdc46e2 100755
--- a/compiler/dex/quick/x86/int_x86.cc
+++ b/compiler/dex/quick/x86/int_x86.cc
@@ -513,7 +513,7 @@
   OpCondBranch(ccode, taken);
 }
 
-void X86Mir2Lir::CalculateMagicAndShift(int divisor, int& magic, int& shift) {
+void X86Mir2Lir::CalculateMagicAndShift(int64_t divisor, int64_t& magic, int& shift, bool is_long) {
   // It does not make sense to calculate magic and shift for zero divisor.
   DCHECK_NE(divisor, 0);
 
@@ -525,8 +525,8 @@
    * Let nc be the most negative value of numerator(n) such that nc = kd + 1,
    * where divisor(d) <= -2.
    * Thus nc can be calculated like:
-   * nc = 2^31 + 2^31 % d - 1, where d >= 2
-   * nc = -2^31 + (2^31 + 1) % d, where d >= 2.
+   * nc = exp + exp % d - 1, where d >= 2 and exp = 2^31 for int or 2^63 for long
+   * nc = -exp + (exp + 1) % d, where d >= 2 and exp = 2^31 for int or 2^63 for long
    *
    * So the shift p is the smallest p satisfying
    * 2^p > nc * (d - 2^p % d), where d >= 2
@@ -536,27 +536,28 @@
    * M = (2^p + d - 2^p % d) / d, where d >= 2
    * M = (2^p - d - 2^p % d) / d, where d <= -2.
    *
-   * Notice that p is always bigger than or equal to 32, so we just return 32-p as
+   * Notice that p is always bigger than or equal to 32/64, so we just return 32-p/64-p as
    * the shift number S.
    */
 
-  int32_t p = 31;
-  const uint32_t two31 = 0x80000000U;
+  int64_t p = (is_long) ? 63 : 31;
+  const uint64_t exp = (is_long) ? 0x8000000000000000ULL : 0x80000000U;
 
   // Initialize the computations.
-  uint32_t abs_d = (divisor >= 0) ? divisor : -divisor;
-  uint32_t tmp = two31 + (static_cast<uint32_t>(divisor) >> 31);
-  uint32_t abs_nc = tmp - 1 - tmp % abs_d;
-  uint32_t quotient1 = two31 / abs_nc;
-  uint32_t remainder1 = two31 % abs_nc;
-  uint32_t quotient2 = two31 / abs_d;
-  uint32_t remainder2 = two31 % abs_d;
+  uint64_t abs_d = (divisor >= 0) ? divisor : -divisor;
+  uint64_t tmp = exp + ((is_long) ? static_cast<uint64_t>(divisor) >> 63 :
+                                    static_cast<uint32_t>(divisor) >> 31);
+  uint64_t abs_nc = tmp - 1 - tmp % abs_d;
+  uint64_t quotient1 = exp / abs_nc;
+  uint64_t remainder1 = exp % abs_nc;
+  uint64_t quotient2 = exp / abs_d;
+  uint64_t remainder2 = exp % abs_d;
 
   /*
    * To avoid handling both positive and negative divisor, Hacker's Delight
    * introduces a method to handle these 2 cases together to avoid duplication.
    */
-  uint32_t delta;
+  uint64_t delta;
   do {
     p++;
     quotient1 = 2 * quotient1;
@@ -575,7 +576,12 @@
   } while (quotient1 < delta || (quotient1 == delta && remainder1 == 0));
 
   magic = (divisor > 0) ? (quotient2 + 1) : (-quotient2 - 1);
-  shift = p - 32;
+
+  if (!is_long) {
+    magic = static_cast<int>(magic);
+  }
+
+  shift = (is_long) ? p - 64 : p - 32;
 }
 
 RegLocation X86Mir2Lir::GenDivRemLit(RegLocation rl_dest, RegStorage reg_lo, int lit, bool is_div) {
@@ -586,52 +592,57 @@
 RegLocation X86Mir2Lir::GenDivRemLit(RegLocation rl_dest, RegLocation rl_src,
                                      int imm, bool is_div) {
   // Use a multiply (and fixup) to perform an int div/rem by a constant.
+  RegLocation rl_result;
 
-  // We have to use fixed registers, so flush all the temps.
-  FlushAllRegs();
-  LockCallTemps();  // Prepare for explicit register usage.
-
-  // Assume that the result will be in EDX.
-  RegLocation rl_result = {kLocPhysReg, 0, 0, 0, 0, 0, 0, 0, 1, rs_r2, INVALID_SREG, INVALID_SREG};
-
-  // handle div/rem by 1 special case.
   if (imm == 1) {
+    rl_result = EvalLoc(rl_dest, kCoreReg, true);
     if (is_div) {
       // x / 1 == x.
-      StoreValue(rl_result, rl_src);
+      LoadValueDirectFixed(rl_src, rl_result.reg);
     } else {
       // x % 1 == 0.
-      LoadConstantNoClobber(rs_r0, 0);
-      // For this case, return the result in EAX.
-      rl_result.reg.SetReg(r0);
+      LoadConstantNoClobber(rl_result.reg, 0);
     }
   } else if (imm == -1) {  // handle 0x80000000 / -1 special case.
+    rl_result = EvalLoc(rl_dest, kCoreReg, true);
     if (is_div) {
-      LIR *minint_branch = 0;
-      LoadValueDirectFixed(rl_src, rs_r0);
-      OpRegImm(kOpCmp, rs_r0, 0x80000000);
-      minint_branch = NewLIR2(kX86Jcc8, 0, kX86CondEq);
+      LoadValueDirectFixed(rl_src, rl_result.reg);
+      OpRegImm(kOpCmp, rl_result.reg, 0x80000000);
+      LIR *minint_branch = NewLIR2(kX86Jcc8, 0, kX86CondEq);
 
       // for x != MIN_INT, x / -1 == -x.
-      NewLIR1(kX86Neg32R, r0);
+      NewLIR1(kX86Neg32R, rl_result.reg.GetReg());
 
-      LIR* branch_around = NewLIR1(kX86Jmp8, 0);
-      // The target for cmp/jmp above.
-      minint_branch->target = NewLIR0(kPseudoTargetLabel);
       // EAX already contains the right value (0x80000000),
-      branch_around->target = NewLIR0(kPseudoTargetLabel);
+      minint_branch->target = NewLIR0(kPseudoTargetLabel);
     } else {
       // x % -1 == 0.
-      LoadConstantNoClobber(rs_r0, 0);
+      LoadConstantNoClobber(rl_result.reg, 0);
     }
-    // For this case, return the result in EAX.
-    rl_result.reg.SetReg(r0);
+  } else if (is_div && IsPowerOfTwo(std::abs(imm))) {
+    // Division using shifting.
+    rl_src = LoadValue(rl_src, kCoreReg);
+    rl_result = EvalLoc(rl_dest, kCoreReg, true);
+    if (IsSameReg(rl_result.reg, rl_src.reg)) {
+      RegStorage rs_temp = AllocTypedTemp(false, kCoreReg);
+      rl_result.reg.SetReg(rs_temp.GetReg());
+    }
+    NewLIR3(kX86Lea32RM, rl_result.reg.GetReg(), rl_src.reg.GetReg(), std::abs(imm) - 1);
+    NewLIR2(kX86Test32RR, rl_src.reg.GetReg(), rl_src.reg.GetReg());
+    OpCondRegReg(kOpCmov, kCondPl, rl_result.reg, rl_src.reg);
+    int shift_amount = LowestSetBit(imm);
+    OpRegImm(kOpAsr, rl_result.reg, shift_amount);
+    if (imm < 0) {
+      OpReg(kOpNeg, rl_result.reg);
+    }
   } else {
     CHECK(imm <= -2 || imm >= 2);
+
     // Use H.S.Warren's Hacker's Delight Chapter 10 and
     // T,Grablund, P.L.Montogomery's Division by invariant integers using multiplication.
-    int magic, shift;
-    CalculateMagicAndShift(imm, magic, shift);
+    int64_t magic;
+    int shift;
+    CalculateMagicAndShift((int64_t)imm, magic, shift, false /* is_long */);
 
     /*
      * For imm >= 2,
@@ -649,18 +660,22 @@
      * 5. Thus, EDX is the quotient
      */
 
+    FlushReg(rs_r0);
+    Clobber(rs_r0);
+    LockTemp(rs_r0);
+    FlushReg(rs_r2);
+    Clobber(rs_r2);
+    LockTemp(rs_r2);
+
+    // Assume that the result will be in EDX.
+    rl_result = {kLocPhysReg, 0, 0, 0, 0, 0, 0, 0, 1, rs_r2, INVALID_SREG, INVALID_SREG};
+
     // Numerator into EAX.
     RegStorage numerator_reg;
     if (!is_div || (imm > 0 && magic < 0) || (imm < 0 && magic > 0)) {
       // We will need the value later.
-      if (rl_src.location == kLocPhysReg) {
-        // We can use it directly.
-        DCHECK(rl_src.reg.GetReg() != rs_r0.GetReg() && rl_src.reg.GetReg() != rs_r2.GetReg());
-        numerator_reg = rl_src.reg;
-      } else {
-        numerator_reg = rs_r1;
-        LoadValueDirectFixed(rl_src, numerator_reg);
-      }
+      rl_src = LoadValue(rl_src, kCoreReg);
+      numerator_reg = rl_src.reg;
       OpRegCopy(rs_r0, numerator_reg);
     } else {
       // Only need this once.  Just put it into EAX.
@@ -1268,91 +1283,113 @@
   }
 }
 
-void X86Mir2Lir::GenMulLong(Instruction::Code, RegLocation rl_dest, RegLocation rl_src1,
-                            RegLocation rl_src2) {
+void X86Mir2Lir::GenArithOpLong(Instruction::Code opcode, RegLocation rl_dest, RegLocation rl_src1,
+                                RegLocation rl_src2) {
+  if (!cu_->target64) {
+    // Some x86 32b ops are fallback.
+    switch (opcode) {
+      case Instruction::NOT_LONG:
+      case Instruction::DIV_LONG:
+      case Instruction::DIV_LONG_2ADDR:
+      case Instruction::REM_LONG:
+      case Instruction::REM_LONG_2ADDR:
+        Mir2Lir::GenArithOpLong(opcode, rl_dest, rl_src1, rl_src2);
+        return;
+
+      default:
+        // Everything else we can handle.
+        break;
+    }
+  }
+
+  switch (opcode) {
+    case Instruction::NOT_LONG:
+      GenNotLong(rl_dest, rl_src2);
+      return;
+
+    case Instruction::ADD_LONG:
+    case Instruction::ADD_LONG_2ADDR:
+      GenLongArith(rl_dest, rl_src1, rl_src2, opcode, true);
+      return;
+
+    case Instruction::SUB_LONG:
+    case Instruction::SUB_LONG_2ADDR:
+      GenLongArith(rl_dest, rl_src1, rl_src2, opcode, false);
+      return;
+
+    case Instruction::MUL_LONG:
+    case Instruction::MUL_LONG_2ADDR:
+      GenMulLong(opcode, rl_dest, rl_src1, rl_src2);
+      return;
+
+    case Instruction::DIV_LONG:
+    case Instruction::DIV_LONG_2ADDR:
+      GenDivRemLong(opcode, rl_dest, rl_src1, rl_src2, /*is_div*/ true);
+      return;
+
+    case Instruction::REM_LONG:
+    case Instruction::REM_LONG_2ADDR:
+      GenDivRemLong(opcode, rl_dest, rl_src1, rl_src2, /*is_div*/ false);
+      return;
+
+    case Instruction::AND_LONG_2ADDR:
+    case Instruction::AND_LONG:
+      GenLongArith(rl_dest, rl_src1, rl_src2, opcode, true);
+      return;
+
+    case Instruction::OR_LONG:
+    case Instruction::OR_LONG_2ADDR:
+      GenLongArith(rl_dest, rl_src1, rl_src2, opcode, true);
+      return;
+
+    case Instruction::XOR_LONG:
+    case Instruction::XOR_LONG_2ADDR:
+      GenLongArith(rl_dest, rl_src1, rl_src2, opcode, true);
+      return;
+
+    case Instruction::NEG_LONG:
+      GenNegLong(rl_dest, rl_src2);
+      return;
+
+    default:
+      LOG(FATAL) << "Invalid long arith op";
+      return;
+  }
+}
+
+bool X86Mir2Lir::GenMulLongConst(RegLocation rl_dest, RegLocation rl_src1, int64_t val) {
   // All memory accesses below reference dalvik regs.
   ScopedMemRefType mem_ref_type(this, ResourceMask::kDalvikReg);
 
-  if (cu_->target64) {
-    if (rl_src1.is_const) {
-      std::swap(rl_src1, rl_src2);
-    }
-    // Are we multiplying by a constant?
-    if (rl_src2.is_const) {
-      int64_t val = mir_graph_->ConstantValueWide(rl_src2);
-      if (val == 0) {
-        RegLocation rl_result = EvalLocWide(rl_dest, kCoreReg, true);
-        OpRegReg(kOpXor, rl_result.reg, rl_result.reg);
-        StoreValueWide(rl_dest, rl_result);
-        return;
-      } else if (val == 1) {
-        StoreValueWide(rl_dest, rl_src1);
-        return;
-      } else if (val == 2) {
-        GenAddLong(Instruction::ADD_LONG, rl_dest, rl_src1, rl_src1);
-        return;
-      } else if (IsPowerOfTwo(val)) {
-        int shift_amount = LowestSetBit(val);
-        if (!BadOverlap(rl_src1, rl_dest)) {
-          rl_src1 = LoadValueWide(rl_src1, kCoreReg);
-          RegLocation rl_result = GenShiftImmOpLong(Instruction::SHL_LONG, rl_dest,
-                                                    rl_src1, shift_amount);
-          StoreValueWide(rl_dest, rl_result);
-          return;
-        }
-      }
-    }
-    rl_src1 = LoadValueWide(rl_src1, kCoreReg);
-    rl_src2 = LoadValueWide(rl_src2, kCoreReg);
+  if (val == 0) {
     RegLocation rl_result = EvalLocWide(rl_dest, kCoreReg, true);
-    if (rl_result.reg.GetReg() == rl_src1.reg.GetReg() &&
-        rl_result.reg.GetReg() == rl_src2.reg.GetReg()) {
-      NewLIR2(kX86Imul64RR, rl_result.reg.GetReg(), rl_result.reg.GetReg());
-    } else if (rl_result.reg.GetReg() != rl_src1.reg.GetReg() &&
-               rl_result.reg.GetReg() == rl_src2.reg.GetReg()) {
-      NewLIR2(kX86Imul64RR, rl_result.reg.GetReg(), rl_src1.reg.GetReg());
-    } else if (rl_result.reg.GetReg() == rl_src1.reg.GetReg() &&
-               rl_result.reg.GetReg() != rl_src2.reg.GetReg()) {
-      NewLIR2(kX86Imul64RR, rl_result.reg.GetReg(), rl_src2.reg.GetReg());
+    if (cu_->target64) {
+      OpRegReg(kOpXor, rl_result.reg, rl_result.reg);
     } else {
-      OpRegCopy(rl_result.reg, rl_src1.reg);
-      NewLIR2(kX86Imul64RR, rl_result.reg.GetReg(), rl_src2.reg.GetReg());
-    }
-    StoreValueWide(rl_dest, rl_result);
-    return;
-  }
-
-  if (rl_src1.is_const) {
-    std::swap(rl_src1, rl_src2);
-  }
-  // Are we multiplying by a constant?
-  if (rl_src2.is_const) {
-    // Do special compare/branch against simple const operand
-    int64_t val = mir_graph_->ConstantValueWide(rl_src2);
-    if (val == 0) {
-      RegLocation rl_result = EvalLocWide(rl_dest, kCoreReg, true);
       OpRegReg(kOpXor, rl_result.reg.GetLow(), rl_result.reg.GetLow());
       OpRegReg(kOpXor, rl_result.reg.GetHigh(), rl_result.reg.GetHigh());
-      StoreValueWide(rl_dest, rl_result);
-      return;
-    } else if (val == 1) {
-      StoreValueWide(rl_dest, rl_src1);
-      return;
-    } else if (val == 2) {
-      GenAddLong(Instruction::ADD_LONG, rl_dest, rl_src1, rl_src1);
-      return;
-    } else if (IsPowerOfTwo(val)) {
-      int shift_amount = LowestSetBit(val);
-      if (!BadOverlap(rl_src1, rl_dest)) {
-        rl_src1 = LoadValueWide(rl_src1, kCoreReg);
-        RegLocation rl_result = GenShiftImmOpLong(Instruction::SHL_LONG, rl_dest,
-                                                  rl_src1, shift_amount);
-        StoreValueWide(rl_dest, rl_result);
-        return;
-      }
     }
+    StoreValueWide(rl_dest, rl_result);
+    return true;
+  } else if (val == 1) {
+    StoreValueWide(rl_dest, rl_src1);
+    return true;
+  } else if (val == 2) {
+    GenArithOpLong(Instruction::ADD_LONG, rl_dest, rl_src1, rl_src1);
+    return true;
+  } else if (IsPowerOfTwo(val)) {
+    int shift_amount = LowestSetBit(val);
+    if (!BadOverlap(rl_src1, rl_dest)) {
+      rl_src1 = LoadValueWide(rl_src1, kCoreReg);
+      RegLocation rl_result = GenShiftImmOpLong(Instruction::SHL_LONG, rl_dest, rl_src1,
+                                                shift_amount);
+      StoreValueWide(rl_dest, rl_result);
+      return true;
+    }
+  }
 
-    // Okay, just bite the bullet and do it.
+  // Okay, on 32b just bite the bullet and do it, still better than the general case.
+  if (!cu_->target64) {
     int32_t val_lo = Low32Bits(val);
     int32_t val_hi = High32Bits(val);
     FlushAllRegs();
@@ -1393,10 +1430,48 @@
     RegLocation rl_result = {kLocPhysReg, 1, 0, 0, 0, 0, 0, 0, 1,
                              RegStorage::MakeRegPair(rs_r0, rs_r2), INVALID_SREG, INVALID_SREG};
     StoreValueWide(rl_dest, rl_result);
+    return true;
+  }
+  return false;
+}
+
+void X86Mir2Lir::GenMulLong(Instruction::Code, RegLocation rl_dest, RegLocation rl_src1,
+                            RegLocation rl_src2) {
+  if (rl_src1.is_const) {
+    std::swap(rl_src1, rl_src2);
+  }
+
+  if (rl_src2.is_const) {
+    if (GenMulLongConst(rl_dest, rl_src1, mir_graph_->ConstantValueWide(rl_src2))) {
+      return;
+    }
+  }
+
+  // All memory accesses below reference dalvik regs.
+  ScopedMemRefType mem_ref_type(this, ResourceMask::kDalvikReg);
+
+  if (cu_->target64) {
+    rl_src1 = LoadValueWide(rl_src1, kCoreReg);
+    rl_src2 = LoadValueWide(rl_src2, kCoreReg);
+    RegLocation rl_result = EvalLocWide(rl_dest, kCoreReg, true);
+    if (rl_result.reg.GetReg() == rl_src1.reg.GetReg() &&
+        rl_result.reg.GetReg() == rl_src2.reg.GetReg()) {
+      NewLIR2(kX86Imul64RR, rl_result.reg.GetReg(), rl_result.reg.GetReg());
+    } else if (rl_result.reg.GetReg() != rl_src1.reg.GetReg() &&
+               rl_result.reg.GetReg() == rl_src2.reg.GetReg()) {
+      NewLIR2(kX86Imul64RR, rl_result.reg.GetReg(), rl_src1.reg.GetReg());
+    } else if (rl_result.reg.GetReg() == rl_src1.reg.GetReg() &&
+               rl_result.reg.GetReg() != rl_src2.reg.GetReg()) {
+      NewLIR2(kX86Imul64RR, rl_result.reg.GetReg(), rl_src2.reg.GetReg());
+    } else {
+      OpRegCopy(rl_result.reg, rl_src1.reg);
+      NewLIR2(kX86Imul64RR, rl_result.reg.GetReg(), rl_src2.reg.GetReg());
+    }
+    StoreValueWide(rl_dest, rl_result);
     return;
   }
 
-  // Nope.  Do it the hard way
+  // Not multiplying by a constant. Do it the hard way
   // Check for V*V.  We can eliminate a multiply in that case, as 2L*1H == 2H*1L.
   bool is_square = mir_graph_->SRegToVReg(rl_src1.s_reg_low) ==
                    mir_graph_->SRegToVReg(rl_src2.s_reg_low);
@@ -1666,31 +1741,6 @@
   StoreFinalValueWide(rl_dest, rl_src1);
 }
 
-void X86Mir2Lir::GenAddLong(Instruction::Code opcode, RegLocation rl_dest,
-                            RegLocation rl_src1, RegLocation rl_src2) {
-  GenLongArith(rl_dest, rl_src1, rl_src2, opcode, true);
-}
-
-void X86Mir2Lir::GenSubLong(Instruction::Code opcode, RegLocation rl_dest,
-                            RegLocation rl_src1, RegLocation rl_src2) {
-  GenLongArith(rl_dest, rl_src1, rl_src2, opcode, false);
-}
-
-void X86Mir2Lir::GenAndLong(Instruction::Code opcode, RegLocation rl_dest,
-                            RegLocation rl_src1, RegLocation rl_src2) {
-  GenLongArith(rl_dest, rl_src1, rl_src2, opcode, true);
-}
-
-void X86Mir2Lir::GenOrLong(Instruction::Code opcode, RegLocation rl_dest,
-                           RegLocation rl_src1, RegLocation rl_src2) {
-  GenLongArith(rl_dest, rl_src1, rl_src2, opcode, true);
-}
-
-void X86Mir2Lir::GenXorLong(Instruction::Code opcode, RegLocation rl_dest,
-                            RegLocation rl_src1, RegLocation rl_src2) {
-  GenLongArith(rl_dest, rl_src1, rl_src2, opcode, true);
-}
-
 void X86Mir2Lir::GenNotLong(RegLocation rl_dest, RegLocation rl_src) {
   if (cu_->target64) {
     rl_src = LoadValueWide(rl_src, kCoreReg);
@@ -1704,13 +1754,191 @@
   }
 }
 
+void X86Mir2Lir::GenDivRemLongLit(RegLocation rl_dest, RegLocation rl_src,
+                                  int64_t imm, bool is_div) {
+  if (imm == 0) {
+    GenDivZeroException();
+  } else if (imm == 1) {
+    if (is_div) {
+      // x / 1 == x.
+      StoreValueWide(rl_dest, rl_src);
+    } else {
+      // x % 1 == 0.
+      RegLocation rl_result = EvalLocWide(rl_dest, kCoreReg, true);
+      LoadConstantWide(rl_result.reg, 0);
+      StoreValueWide(rl_dest, rl_result);
+    }
+  } else if (imm == -1) {  // handle 0x8000000000000000 / -1 special case.
+    if (is_div) {
+      rl_src = LoadValueWide(rl_src, kCoreReg);
+      RegLocation rl_result = EvalLocWide(rl_dest, kCoreReg, true);
+      RegStorage rs_temp = AllocTempWide();
+
+      OpRegCopy(rl_result.reg, rl_src.reg);
+      LoadConstantWide(rs_temp, 0x8000000000000000);
+
+      // If x == MIN_LONG, return MIN_LONG.
+      OpRegReg(kOpCmp, rl_src.reg, rs_temp);
+      LIR *minint_branch = NewLIR2(kX86Jcc8, 0, kX86CondEq);
+
+      // For x != MIN_LONG, x / -1 == -x.
+      OpReg(kOpNeg, rl_result.reg);
+
+      minint_branch->target = NewLIR0(kPseudoTargetLabel);
+      FreeTemp(rs_temp);
+      StoreValueWide(rl_dest, rl_result);
+    } else {
+      // x % -1 == 0.
+      RegLocation rl_result = EvalLocWide(rl_dest, kCoreReg, true);
+      LoadConstantWide(rl_result.reg, 0);
+      StoreValueWide(rl_dest, rl_result);
+    }
+  } else if (is_div && IsPowerOfTwo(std::abs(imm))) {
+    // Division using shifting.
+    rl_src = LoadValueWide(rl_src, kCoreReg);
+    RegLocation rl_result = EvalLocWide(rl_dest, kCoreReg, true);
+    if (IsSameReg(rl_result.reg, rl_src.reg)) {
+      RegStorage rs_temp = AllocTypedTempWide(false, kCoreReg);
+      rl_result.reg.SetReg(rs_temp.GetReg());
+    }
+    LoadConstantWide(rl_result.reg, std::abs(imm) - 1);
+    OpRegReg(kOpAdd, rl_result.reg, rl_src.reg);
+    NewLIR2(kX86Test64RR, rl_src.reg.GetReg(), rl_src.reg.GetReg());
+    OpCondRegReg(kOpCmov, kCondPl, rl_result.reg, rl_src.reg);
+    int shift_amount = LowestSetBit(imm);
+    OpRegImm(kOpAsr, rl_result.reg, shift_amount);
+    if (imm < 0) {
+      OpReg(kOpNeg, rl_result.reg);
+    }
+    StoreValueWide(rl_dest, rl_result);
+  } else {
+    CHECK(imm <= -2 || imm >= 2);
+
+    FlushReg(rs_r0q);
+    Clobber(rs_r0q);
+    LockTemp(rs_r0q);
+    FlushReg(rs_r2q);
+    Clobber(rs_r2q);
+    LockTemp(rs_r2q);
+
+    RegLocation rl_result = {kLocPhysReg, 1, 0, 0, 0, 0, 0, 0, 1, rs_r2q, INVALID_SREG, INVALID_SREG};
+
+    // Use H.S.Warren's Hacker's Delight Chapter 10 and
+    // T,Grablund, P.L.Montogomery's Division by invariant integers using multiplication.
+    int64_t magic;
+    int shift;
+    CalculateMagicAndShift(imm, magic, shift, true /* is_long */);
+
+    /*
+     * For imm >= 2,
+     *     int(n/imm) = floor(n/imm) = floor(M*n/2^S), while n > 0
+     *     int(n/imm) = ceil(n/imm) = floor(M*n/2^S) +1, while n < 0.
+     * For imm <= -2,
+     *     int(n/imm) = ceil(n/imm) = floor(M*n/2^S) +1 , while n > 0
+     *     int(n/imm) = floor(n/imm) = floor(M*n/2^S), while n < 0.
+     * We implement this algorithm in the following way:
+     * 1. multiply magic number m and numerator n, get the higher 64bit result in RDX
+     * 2. if imm > 0 and magic < 0, add numerator to RDX
+     *    if imm < 0 and magic > 0, sub numerator from RDX
+     * 3. if S !=0, SAR S bits for RDX
+     * 4. add 1 to RDX if RDX < 0
+     * 5. Thus, RDX is the quotient
+     */
+
+    // Numerator into RAX.
+    RegStorage numerator_reg;
+    if (!is_div || (imm > 0 && magic < 0) || (imm < 0 && magic > 0)) {
+      // We will need the value later.
+      rl_src = LoadValueWide(rl_src, kCoreReg);
+      numerator_reg = rl_src.reg;
+      OpRegCopyWide(rs_r0q, numerator_reg);
+    } else {
+      // Only need this once.  Just put it into RAX.
+      LoadValueDirectWideFixed(rl_src, rs_r0q);
+    }
+
+    // RDX = magic.
+    LoadConstantWide(rs_r2q, magic);
+
+    // RDX:RAX = magic & dividend.
+    NewLIR1(kX86Imul64DaR, rs_r2q.GetReg());
+
+    if (imm > 0 && magic < 0) {
+      // Add numerator to RDX.
+      DCHECK(numerator_reg.Valid());
+      OpRegReg(kOpAdd, rs_r2q, numerator_reg);
+    } else if (imm < 0 && magic > 0) {
+      DCHECK(numerator_reg.Valid());
+      OpRegReg(kOpSub, rs_r2q, numerator_reg);
+    }
+
+    // Do we need the shift?
+    if (shift != 0) {
+      // Shift RDX by 'shift' bits.
+      OpRegImm(kOpAsr, rs_r2q, shift);
+    }
+
+    // Move RDX to RAX.
+    OpRegCopyWide(rs_r0q, rs_r2q);
+
+    // Move sign bit to bit 0, zeroing the rest.
+    OpRegImm(kOpLsr, rs_r2q, 63);
+
+    // RDX = RDX + RAX.
+    OpRegReg(kOpAdd, rs_r2q, rs_r0q);
+
+    // Quotient is in RDX.
+    if (!is_div) {
+      // We need to compute the remainder.
+      // Remainder is divisor - (quotient * imm).
+      DCHECK(numerator_reg.Valid());
+      OpRegCopyWide(rs_r0q, numerator_reg);
+
+      // Imul doesn't support 64-bit imms.
+      if (imm > std::numeric_limits<int32_t>::max() ||
+          imm < std::numeric_limits<int32_t>::min()) {
+        RegStorage rs_temp = AllocTempWide();
+        LoadConstantWide(rs_temp, imm);
+
+        // RAX = numerator * imm.
+        NewLIR2(kX86Imul64RR, rs_r2q.GetReg(), rs_temp.GetReg());
+
+        FreeTemp(rs_temp);
+      } else {
+        // RAX = numerator * imm.
+        int short_imm = static_cast<int>(imm);
+        NewLIR3(kX86Imul64RRI, rs_r2q.GetReg(), rs_r2q.GetReg(), short_imm);
+      }
+
+      // RDX -= RAX.
+      OpRegReg(kOpSub, rs_r0q, rs_r2q);
+
+      // Store result.
+      OpRegCopyWide(rl_result.reg, rs_r0q);
+    } else {
+      // Store result.
+      OpRegCopyWide(rl_result.reg, rs_r2q);
+    }
+    StoreValueWide(rl_dest, rl_result);
+    FreeTemp(rs_r0q);
+    FreeTemp(rs_r2q);
+  }
+}
+
 void X86Mir2Lir::GenDivRemLong(Instruction::Code, RegLocation rl_dest, RegLocation rl_src1,
-                           RegLocation rl_src2, bool is_div) {
+                               RegLocation rl_src2, bool is_div) {
   if (!cu_->target64) {
     LOG(FATAL) << "Unexpected use GenDivRemLong()";
     return;
   }
 
+  if (rl_src2.is_const) {
+    DCHECK(rl_src2.wide);
+    int64_t imm = mir_graph_->ConstantValueWide(rl_src2);
+    GenDivRemLongLit(rl_dest, rl_src1, imm, is_div);
+    return;
+  }
+
   // We have to use fixed registers, so flush all the temps.
   FlushAllRegs();
   LockCallTemps();  // Prepare for explicit register usage.
@@ -1734,7 +1962,7 @@
   // RHS is -1.
   LoadConstantWide(rs_r6q, 0x8000000000000000);
   NewLIR2(kX86Cmp64RR, rs_r0q.GetReg(), rs_r6q.GetReg());
-  LIR * minint_branch = NewLIR2(kX86Jcc8, 0, kX86CondNe);
+  LIR *minint_branch = NewLIR2(kX86Jcc8, 0, kX86CondNe);
 
   // In 0x8000000000000000/-1 case.
   if (!is_div) {
@@ -2021,7 +2249,7 @@
   } else if (shift_amount == 1 &&
             (opcode ==  Instruction::SHL_LONG || opcode == Instruction::SHL_LONG_2ADDR)) {
     // Need to handle this here to avoid calling StoreValueWide twice.
-    GenAddLong(Instruction::ADD_LONG, rl_dest, rl_src, rl_src);
+    GenArithOpLong(Instruction::ADD_LONG, rl_dest, rl_src, rl_src);
     return;
   }
   if (BadOverlap(rl_src, rl_dest)) {
@@ -2053,7 +2281,7 @@
       if (rl_src2.is_const) {
         isConstSuccess = GenLongLongImm(rl_dest, rl_src1, rl_src2, opcode);
       } else {
-        GenSubLong(opcode, rl_dest, rl_src1, rl_src2);
+        GenArithOpLong(opcode, rl_dest, rl_src1, rl_src2);
         isConstSuccess = true;
       }
       break;
diff --git a/compiler/dex/quick/x86/target_x86.cc b/compiler/dex/quick/x86/target_x86.cc
index 1bda738..bd2e0f3 100755
--- a/compiler/dex/quick/x86/target_x86.cc
+++ b/compiler/dex/quick/x86/target_x86.cc
@@ -1098,11 +1098,6 @@
 }
 
 bool X86Mir2Lir::GenInlinedArrayCopyCharArray(CallInfo* info) {
-  if (cu_->target64) {
-    // TODO: Implement ArrayCOpy intrinsic for x86_64
-    return false;
-  }
-
   RegLocation rl_src = info->args[0];
   RegLocation rl_srcPos = info->args[1];
   RegLocation rl_dst = info->args[2];
@@ -1115,31 +1110,32 @@
     return false;
   }
   ClobberCallerSave();
-  LockCallTemps();  // Using fixed registers
-  LoadValueDirectFixed(rl_src , rs_rAX);
-  LoadValueDirectFixed(rl_dst , rs_rCX);
-  LIR* src_dst_same  = OpCmpBranch(kCondEq, rs_rAX , rs_rCX, nullptr);
-  LIR* src_null_branch = OpCmpImmBranch(kCondEq, rs_rAX , 0, nullptr);
-  LIR* dst_null_branch = OpCmpImmBranch(kCondEq, rs_rCX , 0, nullptr);
-  LoadValueDirectFixed(rl_length , rs_rDX);
-  LIR* len_negative  = OpCmpImmBranch(kCondLt, rs_rDX , 0, nullptr);
-  LIR* len_too_big  = OpCmpImmBranch(kCondGt, rs_rDX , 128, nullptr);
-  LoadValueDirectFixed(rl_src , rs_rAX);
-  LoadWordDisp(rs_rAX , mirror::Array::LengthOffset().Int32Value(), rs_rAX);
+  LockCallTemps();  // Using fixed registers.
+  RegStorage tmp_reg = cu_->target64 ? rs_r11 : rs_rBX;
+  LoadValueDirectFixed(rl_src, rs_rAX);
+  LoadValueDirectFixed(rl_dst, rs_rCX);
+  LIR* src_dst_same  = OpCmpBranch(kCondEq, rs_rAX, rs_rCX, nullptr);
+  LIR* src_null_branch = OpCmpImmBranch(kCondEq, rs_rAX, 0, nullptr);
+  LIR* dst_null_branch = OpCmpImmBranch(kCondEq, rs_rCX, 0, nullptr);
+  LoadValueDirectFixed(rl_length, rs_rDX);
+  // If the length of the copy is > 128 characters (256 bytes) or negative then go slow path.
+  LIR* len_too_big  = OpCmpImmBranch(kCondHi, rs_rDX, 128, nullptr);
+  LoadValueDirectFixed(rl_src, rs_rAX);
+  LoadWordDisp(rs_rAX, mirror::Array::LengthOffset().Int32Value(), rs_rAX);
   LIR* src_bad_len  = nullptr;
   LIR* srcPos_negative  = nullptr;
   if (!rl_srcPos.is_const) {
-    LoadValueDirectFixed(rl_srcPos , rs_rBX);
-    srcPos_negative  = OpCmpImmBranch(kCondLt, rs_rBX , 0, nullptr);
-    OpRegReg(kOpAdd, rs_rBX, rs_rDX);
-    src_bad_len  = OpCmpBranch(kCondLt, rs_rAX , rs_rBX, nullptr);
+    LoadValueDirectFixed(rl_srcPos, tmp_reg);
+    srcPos_negative  = OpCmpImmBranch(kCondLt, tmp_reg, 0, nullptr);
+    OpRegReg(kOpAdd, tmp_reg, rs_rDX);
+    src_bad_len  = OpCmpBranch(kCondLt, rs_rAX, tmp_reg, nullptr);
   } else {
-    int pos_val = mir_graph_->ConstantValue(rl_srcPos.orig_sreg);
+    int32_t pos_val = mir_graph_->ConstantValue(rl_srcPos.orig_sreg);
     if (pos_val == 0) {
-      src_bad_len  = OpCmpBranch(kCondLt, rs_rAX , rs_rDX, nullptr);
+      src_bad_len  = OpCmpBranch(kCondLt, rs_rAX, rs_rDX, nullptr);
     } else {
-      OpRegRegImm(kOpAdd, rs_rBX,  rs_rDX, pos_val);
-      src_bad_len  = OpCmpBranch(kCondLt, rs_rAX , rs_rBX, nullptr);
+      OpRegRegImm(kOpAdd, tmp_reg,  rs_rDX, pos_val);
+      src_bad_len  = OpCmpBranch(kCondLt, rs_rAX, tmp_reg, nullptr);
     }
   }
   LIR* dstPos_negative = nullptr;
@@ -1147,49 +1143,49 @@
   LoadValueDirectFixed(rl_dst, rs_rAX);
   LoadWordDisp(rs_rAX, mirror::Array::LengthOffset().Int32Value(), rs_rAX);
   if (!rl_dstPos.is_const) {
-    LoadValueDirectFixed(rl_dstPos , rs_rBX);
-    dstPos_negative = OpCmpImmBranch(kCondLt, rs_rBX , 0, nullptr);
-    OpRegRegReg(kOpAdd, rs_rBX, rs_rBX, rs_rDX);
-    dst_bad_len = OpCmpBranch(kCondLt, rs_rAX , rs_rBX, nullptr);
+    LoadValueDirectFixed(rl_dstPos, tmp_reg);
+    dstPos_negative = OpCmpImmBranch(kCondLt, tmp_reg, 0, nullptr);
+    OpRegRegReg(kOpAdd, tmp_reg, tmp_reg, rs_rDX);
+    dst_bad_len = OpCmpBranch(kCondLt, rs_rAX, tmp_reg, nullptr);
   } else {
-    int pos_val = mir_graph_->ConstantValue(rl_dstPos.orig_sreg);
+    int32_t pos_val = mir_graph_->ConstantValue(rl_dstPos.orig_sreg);
     if (pos_val == 0) {
-      dst_bad_len = OpCmpBranch(kCondLt, rs_rAX , rs_rDX, nullptr);
+      dst_bad_len = OpCmpBranch(kCondLt, rs_rAX, rs_rDX, nullptr);
     } else {
-      OpRegRegImm(kOpAdd, rs_rBX,  rs_rDX, pos_val);
-      dst_bad_len = OpCmpBranch(kCondLt, rs_rAX , rs_rBX, nullptr);
+      OpRegRegImm(kOpAdd, tmp_reg,  rs_rDX, pos_val);
+      dst_bad_len = OpCmpBranch(kCondLt, rs_rAX, tmp_reg, nullptr);
     }
   }
-  // everything is checked now
-  LoadValueDirectFixed(rl_src , rs_rAX);
-  LoadValueDirectFixed(rl_dst , rs_rBX);
-  LoadValueDirectFixed(rl_srcPos , rs_rCX);
+  // Everything is checked now.
+  LoadValueDirectFixed(rl_src, rs_rAX);
+  LoadValueDirectFixed(rl_dst, tmp_reg);
+  LoadValueDirectFixed(rl_srcPos, rs_rCX);
   NewLIR5(kX86Lea32RA, rs_rAX.GetReg(), rs_rAX.GetReg(),
-       rs_rCX.GetReg() , 1, mirror::Array::DataOffset(2).Int32Value());
-  // RAX now holds the address of the first src element to be copied
+       rs_rCX.GetReg(), 1, mirror::Array::DataOffset(2).Int32Value());
+  // RAX now holds the address of the first src element to be copied.
 
-  LoadValueDirectFixed(rl_dstPos , rs_rCX);
-  NewLIR5(kX86Lea32RA, rs_rBX.GetReg(), rs_rBX.GetReg(),
-       rs_rCX.GetReg() , 1, mirror::Array::DataOffset(2).Int32Value() );
-  // RBX now holds the address of the first dst element to be copied
+  LoadValueDirectFixed(rl_dstPos, rs_rCX);
+  NewLIR5(kX86Lea32RA, tmp_reg.GetReg(), tmp_reg.GetReg(),
+       rs_rCX.GetReg(), 1, mirror::Array::DataOffset(2).Int32Value() );
+  // RBX now holds the address of the first dst element to be copied.
 
-  // check if the number of elements to be copied is odd or even. If odd
+  // Check if the number of elements to be copied is odd or even. If odd
   // then copy the first element (so that the remaining number of elements
   // is even).
-  LoadValueDirectFixed(rl_length , rs_rCX);
+  LoadValueDirectFixed(rl_length, rs_rCX);
   OpRegImm(kOpAnd, rs_rCX, 1);
   LIR* jmp_to_begin_loop  = OpCmpImmBranch(kCondEq, rs_rCX, 0, nullptr);
   OpRegImm(kOpSub, rs_rDX, 1);
   LoadBaseIndexedDisp(rs_rAX, rs_rDX, 1, 0, rs_rCX, kSignedHalf);
-  StoreBaseIndexedDisp(rs_rBX, rs_rDX, 1, 0, rs_rCX, kSignedHalf);
+  StoreBaseIndexedDisp(tmp_reg, rs_rDX, 1, 0, rs_rCX, kSignedHalf);
 
-  // since the remaining number of elements is even, we will copy by
+  // Since the remaining number of elements is even, we will copy by
   // two elements at a time.
-  LIR *beginLoop = NewLIR0(kPseudoTargetLabel);
-  LIR* jmp_to_ret  = OpCmpImmBranch(kCondEq, rs_rDX , 0, nullptr);
+  LIR* beginLoop = NewLIR0(kPseudoTargetLabel);
+  LIR* jmp_to_ret  = OpCmpImmBranch(kCondEq, rs_rDX, 0, nullptr);
   OpRegImm(kOpSub, rs_rDX, 2);
   LoadBaseIndexedDisp(rs_rAX, rs_rDX, 1, 0, rs_rCX, kSingle);
-  StoreBaseIndexedDisp(rs_rBX, rs_rDX, 1, 0, rs_rCX, kSingle);
+  StoreBaseIndexedDisp(tmp_reg, rs_rDX, 1, 0, rs_rCX, kSingle);
   OpUnconditionalBranch(beginLoop);
   LIR *check_failed = NewLIR0(kPseudoTargetLabel);
   LIR* launchpad_branch  = OpUnconditionalBranch(nullptr);
@@ -1197,7 +1193,6 @@
   jmp_to_ret->target = return_point;
   jmp_to_begin_loop->target = beginLoop;
   src_dst_same->target = check_failed;
-  len_negative->target = check_failed;
   len_too_big->target = check_failed;
   src_null_branch->target = check_failed;
   if (srcPos_negative != nullptr)
@@ -2868,4 +2863,24 @@
   return true;
 }
 
+bool X86Mir2Lir::GenInlinedCurrentThread(CallInfo* info) {
+  RegLocation rl_dest = InlineTarget(info);
+
+  // Early exit if the result is unused.
+  if (rl_dest.orig_sreg < 0) {
+    return true;
+  }
+
+  RegLocation rl_result = EvalLoc(rl_dest, kRefReg, true);
+
+  if (cu_->target64) {
+    OpRegThreadMem(kOpMov, rl_result.reg, Thread::PeerOffset<8>());
+  } else {
+    OpRegThreadMem(kOpMov, rl_result.reg, Thread::PeerOffset<4>());
+  }
+
+  StoreValue(rl_dest, rl_result);
+  return true;
+}
+
 }  // namespace art
diff --git a/compiler/dex/quick/x86/utility_x86.cc b/compiler/dex/quick/x86/utility_x86.cc
index a77d79e..a48613f 100644
--- a/compiler/dex/quick/x86/utility_x86.cc
+++ b/compiler/dex/quick/x86/utility_x86.cc
@@ -565,6 +565,7 @@
     bool is_fp = r_dest.IsFloat();
     // TODO: clean this up once we fully recognize 64-bit storage containers.
     if (is_fp) {
+      DCHECK(r_dest.IsDouble());
       if (value == 0) {
         return NewLIR2(kX86XorpsRR, low_reg_val, low_reg_val);
       } else if (base_of_code_ != nullptr) {
@@ -594,16 +595,23 @@
         Clobber(rl_method.reg);
         store_method_addr_used_ = true;
       } else {
-        if (val_lo == 0) {
-          res = NewLIR2(kX86XorpsRR, low_reg_val, low_reg_val);
+        if (r_dest.IsPair()) {
+          if (val_lo == 0) {
+            res = NewLIR2(kX86XorpsRR, low_reg_val, low_reg_val);
+          } else {
+            res = LoadConstantNoClobber(RegStorage::FloatSolo32(low_reg_val), val_lo);
+          }
+          if (val_hi != 0) {
+            RegStorage r_dest_hi = AllocTempDouble();
+            LoadConstantNoClobber(r_dest_hi, val_hi);
+            NewLIR2(kX86PunpckldqRR, low_reg_val, r_dest_hi.GetReg());
+            FreeTemp(r_dest_hi);
+          }
         } else {
-          res = LoadConstantNoClobber(RegStorage::FloatSolo32(low_reg_val), val_lo);
-        }
-        if (val_hi != 0) {
-          RegStorage r_dest_hi = AllocTempDouble();
-          LoadConstantNoClobber(r_dest_hi, val_hi);
-          NewLIR2(kX86PunpckldqRR, low_reg_val, r_dest_hi.GetReg());
-          FreeTemp(r_dest_hi);
+          RegStorage r_temp = AllocTypedTempWide(false, kCoreReg);
+          res = LoadConstantWide(r_temp, value);
+          OpRegCopyWide(r_dest, r_temp);
+          FreeTemp(r_temp);
         }
       }
     } else {
@@ -1008,8 +1016,8 @@
 }
 
 void X86Mir2Lir::AnalyzeDoubleUse(RegLocation use) {
-  // If this is a double literal, we will want it in the literal pool.
-  if (use.is_const) {
+  // If this is a double literal, we will want it in the literal pool on 32b platforms.
+  if (use.is_const && !cu_->target64) {
     store_method_addr_ = true;
   }
 }
@@ -1043,12 +1051,18 @@
 }
 
 void X86Mir2Lir::AnalyzeInvokeStatic(int opcode, BasicBlock * bb, MIR *mir) {
+  // For now this is only actual for x86-32.
+  if (cu_->target64) {
+    return;
+  }
+
   uint32_t index = mir->dalvikInsn.vB;
   if (!(mir->optimization_flags & MIR_INLINED)) {
     DCHECK(cu_->compiler_driver->GetMethodInlinerMap() != nullptr);
+    DexFileMethodInliner* method_inliner =
+      cu_->compiler_driver->GetMethodInlinerMap()->GetMethodInliner(cu_->dex_file);
     InlineMethod method;
-    if (cu_->compiler_driver->GetMethodInlinerMap()->GetMethodInliner(cu_->dex_file)
-        ->IsIntrinsic(index, &method)) {
+    if (method_inliner->IsIntrinsic(index, &method)) {
       switch (method.opcode) {
         case kIntrinsicAbsDouble:
         case kIntrinsicMinMaxDouble:
diff --git a/compiler/dex/reg_storage.h b/compiler/dex/reg_storage.h
index addd628..706933a 100644
--- a/compiler/dex/reg_storage.h
+++ b/compiler/dex/reg_storage.h
@@ -192,6 +192,18 @@
     return (reg & (kFloatingPoint | k64BitMask)) == kFloatingPoint;
   }
 
+  static constexpr bool Is32Bit(uint16_t reg) {
+    return ((reg & kShapeMask) == k32BitSolo);
+  }
+
+  static constexpr bool Is64Bit(uint16_t reg) {
+    return ((reg & k64BitMask) == k64Bits);
+  }
+
+  static constexpr bool Is64BitSolo(uint16_t reg) {
+    return ((reg & kShapeMask) == k64BitSolo);
+  }
+
   // Used to retrieve either the low register of a pair, or the only register.
   int GetReg() const {
     DCHECK(!IsPair()) << "reg_ = 0x" << std::hex << reg_;
@@ -265,11 +277,11 @@
   }
 
   static constexpr bool SameRegType(RegStorage reg1, RegStorage reg2) {
-    return (reg1.IsDouble() == reg2.IsDouble()) && (reg1.IsSingle() == reg2.IsSingle());
+    return ((reg1.reg_ & kShapeTypeMask) == (reg2.reg_ & kShapeTypeMask));
   }
 
   static constexpr bool SameRegType(int reg1, int reg2) {
-    return (IsDouble(reg1) == IsDouble(reg2)) && (IsSingle(reg1) == IsSingle(reg2));
+    return ((reg1 & kShapeTypeMask) == (reg2 & kShapeTypeMask));
   }
 
   // Create a 32-bit solo.
diff --git a/compiler/dex/verified_method.cc b/compiler/dex/verified_method.cc
index 01c8f80..1b3f2a1 100644
--- a/compiler/dex/verified_method.cc
+++ b/compiler/dex/verified_method.cc
@@ -216,7 +216,7 @@
     verifier::RegisterLine* line = method_verifier->GetRegLine(dex_pc);
     bool is_range = (inst->Opcode() ==  Instruction::INVOKE_VIRTUAL_RANGE) ||
         (inst->Opcode() ==  Instruction::INVOKE_INTERFACE_RANGE);
-    const verifier::RegType&
+    verifier::RegType&
         reg_type(line->GetRegisterType(is_range ? inst->VRegC_3rc() : inst->VRegC_35c()));
 
     if (!reg_type.HasClass()) {
@@ -284,18 +284,18 @@
       const verifier::RegisterLine* line = method_verifier->GetRegLine(dex_pc);
       bool is_safe_cast = false;
       if (code == Instruction::CHECK_CAST) {
-        const verifier::RegType& reg_type(line->GetRegisterType(inst->VRegA_21c()));
-        const verifier::RegType& cast_type =
+        verifier::RegType& reg_type(line->GetRegisterType(inst->VRegA_21c()));
+        verifier::RegType& cast_type =
             method_verifier->ResolveCheckedClass(inst->VRegB_21c());
         is_safe_cast = cast_type.IsStrictlyAssignableFrom(reg_type);
       } else {
-        const verifier::RegType& array_type(line->GetRegisterType(inst->VRegB_23x()));
+        verifier::RegType& array_type(line->GetRegisterType(inst->VRegB_23x()));
         // We only know its safe to assign to an array if the array type is precise. For example,
         // an Object[] can have any type of object stored in it, but it may also be assigned a
         // String[] in which case the stores need to be of Strings.
         if (array_type.IsPreciseReference()) {
-          const verifier::RegType& value_type(line->GetRegisterType(inst->VRegA_23x()));
-          const verifier::RegType& component_type = method_verifier->GetRegTypeCache()
+          verifier::RegType& value_type(line->GetRegisterType(inst->VRegA_23x()));
+          verifier::RegType& component_type = method_verifier->GetRegTypeCache()
               ->GetComponentType(array_type, method_verifier->GetClassLoader());
           is_safe_cast = component_type.IsStrictlyAssignableFrom(value_type);
         }
diff --git a/compiler/driver/compiler_driver.cc b/compiler/driver/compiler_driver.cc
index f85bc65..ed126ad 100644
--- a/compiler/driver/compiler_driver.cc
+++ b/compiler/driver/compiler_driver.cc
@@ -933,13 +933,13 @@
   }
   *out_is_finalizable = resolved_class->IsFinalizable();
   const bool compiling_boot = Runtime::Current()->GetHeap()->IsCompilingBoot();
+  const bool support_boot_image_fixup = GetSupportBootImageFixup();
   if (compiling_boot) {
     // boot -> boot class pointers.
     // True if the class is in the image at boot compiling time.
     const bool is_image_class = IsImage() && IsImageClass(
         dex_file.StringDataByIdx(dex_file.GetTypeId(type_idx).descriptor_idx_));
     // True if pc relative load works.
-    const bool support_boot_image_fixup = GetSupportBootImageFixup();
     if (is_image_class && support_boot_image_fixup) {
       *is_type_initialized = resolved_class->IsInitialized();
       *use_direct_type_ptr = false;
@@ -952,7 +952,7 @@
     // True if the class is in the image at app compiling time.
     const bool class_in_image =
         Runtime::Current()->GetHeap()->FindSpaceFromObject(resolved_class, false)->IsImageSpace();
-    if (class_in_image) {
+    if (class_in_image && support_boot_image_fixup) {
       // boot -> app class pointers.
       *is_type_initialized = resolved_class->IsInitialized();
       // TODO This is somewhat hacky. We should refactor all of this invoke codepath.
diff --git a/compiler/elf_patcher.cc b/compiler/elf_patcher.cc
index 6112fbb..137110f 100644
--- a/compiler/elf_patcher.cc
+++ b/compiler/elf_patcher.cc
@@ -120,6 +120,7 @@
 
 uint32_t* ElfPatcher::GetPatchLocation(uintptr_t patch_ptr) {
   CHECK_GE(patch_ptr, reinterpret_cast<uintptr_t>(oat_file_->Begin()));
+  CHECK_LE(patch_ptr, reinterpret_cast<uintptr_t>(oat_file_->End()));
   uintptr_t off = patch_ptr - reinterpret_cast<uintptr_t>(oat_file_->Begin());
   uintptr_t ret = reinterpret_cast<uintptr_t>(oat_header_) + off;
 
@@ -144,20 +145,20 @@
           cpatch->GetTargetDexFile()->GetMethodId(cpatch->GetTargetMethodIdx());
       uint32_t expected = reinterpret_cast<uintptr_t>(&id) & 0xFFFFFFFF;
       uint32_t actual = *patch_location;
-      CHECK(actual == expected || actual == value) << std::hex
-          << "actual=" << actual
-          << "expected=" << expected
-          << "value=" << value;
+      CHECK(actual == expected || actual == value) << "Patching call failed: " << std::hex
+          << " actual=" << actual
+          << " expected=" << expected
+          << " value=" << value;
     }
     if (patch->IsType()) {
       const CompilerDriver::TypePatchInformation* tpatch = patch->AsType();
       const DexFile::TypeId& id = tpatch->GetDexFile().GetTypeId(tpatch->GetTargetTypeIdx());
       uint32_t expected = reinterpret_cast<uintptr_t>(&id) & 0xFFFFFFFF;
       uint32_t actual = *patch_location;
-      CHECK(actual == expected || actual == value) << std::hex
-          << "actual=" << actual
-          << "expected=" << expected
-          << "value=" << value;
+      CHECK(actual == expected || actual == value) << "Patching type failed: " << std::hex
+          << " actual=" << actual
+          << " expected=" << expected
+          << " value=" << value;
     }
   }
   *patch_location = value;
diff --git a/compiler/image_test.cc b/compiler/image_test.cc
index 3005e56..6b23345 100644
--- a/compiler/image_test.cc
+++ b/compiler/image_test.cc
@@ -141,6 +141,8 @@
   std::string image("-Ximage:");
   image.append(image_location.GetFilename());
   options.push_back(std::make_pair(image.c_str(), reinterpret_cast<void*>(NULL)));
+  // By default the compiler this creates will not include patch information.
+  options.push_back(std::make_pair("-Xnorelocate", nullptr));
 
   if (!Runtime::Create(options, false)) {
     LOG(FATAL) << "Failed to create runtime";
diff --git a/compiler/utils/arena_allocator.h b/compiler/utils/arena_allocator.h
index f4bcb1d..7bfbb6f 100644
--- a/compiler/utils/arena_allocator.h
+++ b/compiler/utils/arena_allocator.h
@@ -24,6 +24,7 @@
 #include "base/mutex.h"
 #include "mem_map.h"
 #include "utils.h"
+#include "utils/debug_stack.h"
 
 namespace art {
 
@@ -34,6 +35,9 @@
 class ScopedArenaAllocator;
 class MemStats;
 
+template <typename T>
+class ArenaAllocatorAdapter;
+
 static constexpr bool kArenaAllocatorCountAllocations = false;
 
 // Type of allocation for memory tuning.
@@ -147,11 +151,14 @@
   DISALLOW_COPY_AND_ASSIGN(ArenaPool);
 };
 
-class ArenaAllocator : private ArenaAllocatorStats {
+class ArenaAllocator : private DebugStackRefCounter, private ArenaAllocatorStats {
  public:
   explicit ArenaAllocator(ArenaPool* pool);
   ~ArenaAllocator();
 
+  // Get adapter for use in STL containers. See arena_containers.h .
+  ArenaAllocatorAdapter<void> Adapter(ArenaAllocKind kind = kArenaAllocSTL);
+
   // Returns zeroed memory.
   void* Alloc(size_t bytes, ArenaAllocKind kind) ALWAYS_INLINE {
     if (UNLIKELY(running_on_valgrind_)) {
@@ -190,6 +197,9 @@
   Arena* arena_head_;
   bool running_on_valgrind_;
 
+  template <typename U>
+  friend class ArenaAllocatorAdapter;
+
   DISALLOW_COPY_AND_ASSIGN(ArenaAllocator);
 };  // ArenaAllocator
 
diff --git a/compiler/utils/arena_containers.h b/compiler/utils/arena_containers.h
new file mode 100644
index 0000000..c48b0c8
--- /dev/null
+++ b/compiler/utils/arena_containers.h
@@ -0,0 +1,205 @@
+/*
+ * Copyright (C) 2014 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef ART_COMPILER_UTILS_ARENA_CONTAINERS_H_
+#define ART_COMPILER_UTILS_ARENA_CONTAINERS_H_
+
+#include <deque>
+#include <queue>
+#include <set>
+#include <vector>
+
+#include "utils/arena_allocator.h"
+#include "safe_map.h"
+
+namespace art {
+
+// Adapter for use of ArenaAllocator in STL containers.
+// Use ArenaAllocator::Adapter() to create an adapter to pass to container constructors.
+// For example,
+//   struct Foo {
+//     explicit Foo(ArenaAllocator* allocator)
+//         : foo_vector(allocator->Adapter(kArenaAllocMisc)),
+//           foo_map(std::less<int>(), allocator->Adapter()) {
+//     }
+//     ArenaVector<int> foo_vector;
+//     ArenaSafeMap<int, int> foo_map;
+//   };
+template <typename T>
+class ArenaAllocatorAdapter;
+
+template <typename T>
+using ArenaDeque = std::deque<T, ArenaAllocatorAdapter<T>>;
+
+template <typename T>
+using ArenaQueue = std::queue<T, ArenaDeque<T>>;
+
+template <typename T>
+using ArenaVector = std::vector<T, ArenaAllocatorAdapter<T>>;
+
+template <typename T, typename Comparator = std::less<T>>
+using ArenaSet = std::set<T, Comparator, ArenaAllocatorAdapter<T>>;
+
+template <typename K, typename V, typename Comparator = std::less<K>>
+using ArenaSafeMap =
+    SafeMap<K, V, Comparator, ArenaAllocatorAdapter<std::pair<const K, V>>>;
+
+// Implementation details below.
+
+template <bool kCount>
+class ArenaAllocatorAdapterKindImpl;
+
+template <>
+class ArenaAllocatorAdapterKindImpl<false> {
+ public:
+  // Not tracking allocations, ignore the supplied kind and arbitrarily provide kArenaAllocSTL.
+  explicit ArenaAllocatorAdapterKindImpl(ArenaAllocKind kind) { }
+  ArenaAllocatorAdapterKindImpl& operator=(const ArenaAllocatorAdapterKindImpl& other) = default;
+  ArenaAllocKind Kind() { return kArenaAllocSTL; }
+};
+
+template <bool kCount>
+class ArenaAllocatorAdapterKindImpl {
+ public:
+  explicit ArenaAllocatorAdapterKindImpl(ArenaAllocKind kind) : kind_(kind) { }
+  ArenaAllocatorAdapterKindImpl& operator=(const ArenaAllocatorAdapterKindImpl& other) = default;
+  ArenaAllocKind Kind() { return kind_; }
+
+ private:
+  ArenaAllocKind kind_;
+};
+
+typedef ArenaAllocatorAdapterKindImpl<kArenaAllocatorCountAllocations> ArenaAllocatorAdapterKind;
+
+template <>
+class ArenaAllocatorAdapter<void>
+    : private DebugStackReference, private ArenaAllocatorAdapterKind {
+ public:
+  typedef void value_type;
+  typedef void* pointer;
+  typedef const void* const_pointer;
+
+  template <typename U>
+  struct rebind {
+    typedef ArenaAllocatorAdapter<U> other;
+  };
+
+  explicit ArenaAllocatorAdapter(ArenaAllocator* arena_allocator,
+                                 ArenaAllocKind kind = kArenaAllocSTL)
+      : DebugStackReference(arena_allocator),
+        ArenaAllocatorAdapterKind(kind),
+        arena_allocator_(arena_allocator) {
+  }
+  template <typename U>
+  ArenaAllocatorAdapter(const ArenaAllocatorAdapter<U>& other)
+      : DebugStackReference(other),
+        ArenaAllocatorAdapterKind(other),
+        arena_allocator_(other.arena_allocator_) {
+  }
+  ArenaAllocatorAdapter(const ArenaAllocatorAdapter& other) = default;
+  ArenaAllocatorAdapter& operator=(const ArenaAllocatorAdapter& other) = default;
+  ~ArenaAllocatorAdapter() = default;
+
+ private:
+  ArenaAllocator* arena_allocator_;
+
+  template <typename U>
+  friend class ArenaAllocatorAdapter;
+};
+
+template <typename T>
+class ArenaAllocatorAdapter : private DebugStackReference, private ArenaAllocatorAdapterKind {
+ public:
+  typedef T value_type;
+  typedef T* pointer;
+  typedef T& reference;
+  typedef const T* const_pointer;
+  typedef const T& const_reference;
+  typedef size_t size_type;
+  typedef ptrdiff_t difference_type;
+
+  template <typename U>
+  struct rebind {
+    typedef ArenaAllocatorAdapter<U> other;
+  };
+
+  explicit ArenaAllocatorAdapter(ArenaAllocator* arena_allocator, ArenaAllocKind kind)
+      : DebugStackReference(arena_allocator),
+        ArenaAllocatorAdapterKind(kind),
+        arena_allocator_(arena_allocator) {
+  }
+  template <typename U>
+  ArenaAllocatorAdapter(const ArenaAllocatorAdapter<U>& other)
+      : DebugStackReference(other),
+        ArenaAllocatorAdapterKind(other),
+        arena_allocator_(other.arena_allocator_) {
+  }
+  ArenaAllocatorAdapter(const ArenaAllocatorAdapter& other) = default;
+  ArenaAllocatorAdapter& operator=(const ArenaAllocatorAdapter& other) = default;
+  ~ArenaAllocatorAdapter() = default;
+
+  size_type max_size() const {
+    return static_cast<size_type>(-1) / sizeof(T);
+  }
+
+  pointer address(reference x) const { return &x; }
+  const_pointer address(const_reference x) const { return &x; }
+
+  pointer allocate(size_type n, ArenaAllocatorAdapter<void>::pointer hint = nullptr) {
+    DCHECK_LE(n, max_size());
+    return reinterpret_cast<T*>(arena_allocator_->Alloc(n * sizeof(T),
+                                                        ArenaAllocatorAdapterKind::Kind()));
+  }
+  void deallocate(pointer p, size_type n) {
+  }
+
+  void construct(pointer p, const_reference val) {
+    new (static_cast<void*>(p)) value_type(val);
+  }
+  void destroy(pointer p) {
+    p->~value_type();
+  }
+
+ private:
+  ArenaAllocator* arena_allocator_;
+
+  template <typename U>
+  friend class ArenaAllocatorAdapter;
+
+  template <typename U>
+  friend bool operator==(const ArenaAllocatorAdapter<U>& lhs,
+                         const ArenaAllocatorAdapter<U>& rhs);
+};
+
+template <typename T>
+inline bool operator==(const ArenaAllocatorAdapter<T>& lhs,
+                       const ArenaAllocatorAdapter<T>& rhs) {
+  return lhs.arena_allocator_ == rhs.arena_allocator_;
+}
+
+template <typename T>
+inline bool operator!=(const ArenaAllocatorAdapter<T>& lhs,
+                       const ArenaAllocatorAdapter<T>& rhs) {
+  return !(lhs == rhs);
+}
+
+inline ArenaAllocatorAdapter<void> ArenaAllocator::Adapter(ArenaAllocKind kind) {
+  return ArenaAllocatorAdapter<void>(this, kind);
+}
+
+}  // namespace art
+
+#endif  // ART_COMPILER_UTILS_ARENA_CONTAINERS_H_
diff --git a/compiler/utils/scoped_arena_allocator.h b/compiler/utils/scoped_arena_allocator.h
index 9f33f2d..62ea330 100644
--- a/compiler/utils/scoped_arena_allocator.h
+++ b/compiler/utils/scoped_arena_allocator.h
@@ -120,8 +120,8 @@
     return arena_stack_->Alloc(bytes, kind);
   }
 
-  // ScopedArenaAllocatorAdapter is incomplete here, we need to define this later.
-  ScopedArenaAllocatorAdapter<void> Adapter();
+  // Get adapter for use in STL containers. See scoped_arena_containers.h .
+  ScopedArenaAllocatorAdapter<void> Adapter(ArenaAllocKind kind = kArenaAllocSTL);
 
   // Allow a delete-expression to destroy but not deallocate allocators created by Create().
   static void operator delete(void* ptr) { UNUSED(ptr); }
@@ -138,125 +138,6 @@
   DISALLOW_COPY_AND_ASSIGN(ScopedArenaAllocator);
 };
 
-template <>
-class ScopedArenaAllocatorAdapter<void>
-    : private DebugStackReference, private DebugStackIndirectTopRef {
- public:
-  typedef void value_type;
-  typedef void* pointer;
-  typedef const void* const_pointer;
-
-  template <typename U>
-  struct rebind {
-    typedef ScopedArenaAllocatorAdapter<U> other;
-  };
-
-  explicit ScopedArenaAllocatorAdapter(ScopedArenaAllocator* arena_allocator)
-      : DebugStackReference(arena_allocator),
-        DebugStackIndirectTopRef(arena_allocator),
-        arena_stack_(arena_allocator->arena_stack_) {
-  }
-  template <typename U>
-  ScopedArenaAllocatorAdapter(const ScopedArenaAllocatorAdapter<U>& other)
-      : DebugStackReference(other),
-        DebugStackIndirectTopRef(other),
-        arena_stack_(other.arena_stack_) {
-  }
-  ScopedArenaAllocatorAdapter(const ScopedArenaAllocatorAdapter& other) = default;
-  ScopedArenaAllocatorAdapter& operator=(const ScopedArenaAllocatorAdapter& other) = default;
-  ~ScopedArenaAllocatorAdapter() = default;
-
- private:
-  ArenaStack* arena_stack_;
-
-  template <typename U>
-  friend class ScopedArenaAllocatorAdapter;
-};
-
-// Adapter for use of ScopedArenaAllocator in STL containers.
-template <typename T>
-class ScopedArenaAllocatorAdapter : private DebugStackReference, private DebugStackIndirectTopRef {
- public:
-  typedef T value_type;
-  typedef T* pointer;
-  typedef T& reference;
-  typedef const T* const_pointer;
-  typedef const T& const_reference;
-  typedef size_t size_type;
-  typedef ptrdiff_t difference_type;
-
-  template <typename U>
-  struct rebind {
-    typedef ScopedArenaAllocatorAdapter<U> other;
-  };
-
-  explicit ScopedArenaAllocatorAdapter(ScopedArenaAllocator* arena_allocator)
-      : DebugStackReference(arena_allocator),
-        DebugStackIndirectTopRef(arena_allocator),
-        arena_stack_(arena_allocator->arena_stack_) {
-  }
-  template <typename U>
-  ScopedArenaAllocatorAdapter(const ScopedArenaAllocatorAdapter<U>& other)
-      : DebugStackReference(other),
-        DebugStackIndirectTopRef(other),
-        arena_stack_(other.arena_stack_) {
-  }
-  ScopedArenaAllocatorAdapter(const ScopedArenaAllocatorAdapter& other) = default;
-  ScopedArenaAllocatorAdapter& operator=(const ScopedArenaAllocatorAdapter& other) = default;
-  ~ScopedArenaAllocatorAdapter() = default;
-
-  size_type max_size() const {
-    return static_cast<size_type>(-1) / sizeof(T);
-  }
-
-  pointer address(reference x) const { return &x; }
-  const_pointer address(const_reference x) const { return &x; }
-
-  pointer allocate(size_type n, ScopedArenaAllocatorAdapter<void>::pointer hint = nullptr) {
-    DCHECK_LE(n, max_size());
-    DebugStackIndirectTopRef::CheckTop();
-    return reinterpret_cast<T*>(arena_stack_->Alloc(n * sizeof(T), kArenaAllocSTL));
-  }
-  void deallocate(pointer p, size_type n) {
-    DebugStackIndirectTopRef::CheckTop();
-  }
-
-  void construct(pointer p, const_reference val) {
-    // Don't CheckTop(), allow reusing existing capacity of a vector/deque below the top.
-    new (static_cast<void*>(p)) value_type(val);
-  }
-  void destroy(pointer p) {
-    // Don't CheckTop(), allow reusing existing capacity of a vector/deque below the top.
-    p->~value_type();
-  }
-
- private:
-  ArenaStack* arena_stack_;
-
-  template <typename U>
-  friend class ScopedArenaAllocatorAdapter;
-
-  template <typename U>
-  friend bool operator==(const ScopedArenaAllocatorAdapter<U>& lhs,
-                         const ScopedArenaAllocatorAdapter<U>& rhs);
-};
-
-template <typename T>
-inline bool operator==(const ScopedArenaAllocatorAdapter<T>& lhs,
-                       const ScopedArenaAllocatorAdapter<T>& rhs) {
-  return lhs.arena_stack_ == rhs.arena_stack_;
-}
-
-template <typename T>
-inline bool operator!=(const ScopedArenaAllocatorAdapter<T>& lhs,
-                       const ScopedArenaAllocatorAdapter<T>& rhs) {
-  return !(lhs == rhs);
-}
-
-inline ScopedArenaAllocatorAdapter<void> ScopedArenaAllocator::Adapter() {
-  return ScopedArenaAllocatorAdapter<void>(this);
-}
-
 }  // namespace art
 
 #endif  // ART_COMPILER_UTILS_SCOPED_ARENA_ALLOCATOR_H_
diff --git a/compiler/utils/scoped_arena_containers.h b/compiler/utils/scoped_arena_containers.h
index 6728565..0de7403 100644
--- a/compiler/utils/scoped_arena_containers.h
+++ b/compiler/utils/scoped_arena_containers.h
@@ -22,11 +22,23 @@
 #include <set>
 #include <vector>
 
+#include "utils/arena_containers.h"  // For ArenaAllocatorAdapterKind.
 #include "utils/scoped_arena_allocator.h"
 #include "safe_map.h"
 
 namespace art {
 
+// Adapter for use of ScopedArenaAllocator in STL containers.
+// Use ScopedArenaAllocator::Adapter() to create an adapter to pass to container constructors.
+// For example,
+//   void foo(ScopedArenaAllocator* allocator) {
+//     ScopedArenaVector<int> foo_vector(allocator->Adapter(kArenaAllocMisc));
+//     ScopedArenaSafeMap<int, int> foo_map(std::less<int>(), allocator->Adapter());
+//     // Use foo_vector and foo_map...
+//   }
+template <typename T>
+class ScopedArenaAllocatorAdapter;
+
 template <typename T>
 using ScopedArenaDeque = std::deque<T, ScopedArenaAllocatorAdapter<T>>;
 
@@ -43,6 +55,136 @@
 using ScopedArenaSafeMap =
     SafeMap<K, V, Comparator, ScopedArenaAllocatorAdapter<std::pair<const K, V>>>;
 
+// Implementation details below.
+
+template <>
+class ScopedArenaAllocatorAdapter<void>
+    : private DebugStackReference, private DebugStackIndirectTopRef,
+      private ArenaAllocatorAdapterKind {
+ public:
+  typedef void value_type;
+  typedef void* pointer;
+  typedef const void* const_pointer;
+
+  template <typename U>
+  struct rebind {
+    typedef ScopedArenaAllocatorAdapter<U> other;
+  };
+
+  explicit ScopedArenaAllocatorAdapter(ScopedArenaAllocator* arena_allocator,
+                                       ArenaAllocKind kind = kArenaAllocSTL)
+      : DebugStackReference(arena_allocator),
+        DebugStackIndirectTopRef(arena_allocator),
+        ArenaAllocatorAdapterKind(kind),
+        arena_stack_(arena_allocator->arena_stack_) {
+  }
+  template <typename U>
+  ScopedArenaAllocatorAdapter(const ScopedArenaAllocatorAdapter<U>& other)
+      : DebugStackReference(other),
+        DebugStackIndirectTopRef(other),
+        ArenaAllocatorAdapterKind(other),
+        arena_stack_(other.arena_stack_) {
+  }
+  ScopedArenaAllocatorAdapter(const ScopedArenaAllocatorAdapter& other) = default;
+  ScopedArenaAllocatorAdapter& operator=(const ScopedArenaAllocatorAdapter& other) = default;
+  ~ScopedArenaAllocatorAdapter() = default;
+
+ private:
+  ArenaStack* arena_stack_;
+
+  template <typename U>
+  friend class ScopedArenaAllocatorAdapter;
+};
+
+template <typename T>
+class ScopedArenaAllocatorAdapter
+    : private DebugStackReference, private DebugStackIndirectTopRef,
+      private ArenaAllocatorAdapterKind {
+ public:
+  typedef T value_type;
+  typedef T* pointer;
+  typedef T& reference;
+  typedef const T* const_pointer;
+  typedef const T& const_reference;
+  typedef size_t size_type;
+  typedef ptrdiff_t difference_type;
+
+  template <typename U>
+  struct rebind {
+    typedef ScopedArenaAllocatorAdapter<U> other;
+  };
+
+  explicit ScopedArenaAllocatorAdapter(ScopedArenaAllocator* arena_allocator,
+                                       ArenaAllocKind kind = kArenaAllocSTL)
+      : DebugStackReference(arena_allocator),
+        DebugStackIndirectTopRef(arena_allocator),
+        ArenaAllocatorAdapterKind(kind),
+        arena_stack_(arena_allocator->arena_stack_) {
+  }
+  template <typename U>
+  ScopedArenaAllocatorAdapter(const ScopedArenaAllocatorAdapter<U>& other)
+      : DebugStackReference(other),
+        DebugStackIndirectTopRef(other),
+        ArenaAllocatorAdapterKind(other),
+        arena_stack_(other.arena_stack_) {
+  }
+  ScopedArenaAllocatorAdapter(const ScopedArenaAllocatorAdapter& other) = default;
+  ScopedArenaAllocatorAdapter& operator=(const ScopedArenaAllocatorAdapter& other) = default;
+  ~ScopedArenaAllocatorAdapter() = default;
+
+  size_type max_size() const {
+    return static_cast<size_type>(-1) / sizeof(T);
+  }
+
+  pointer address(reference x) const { return &x; }
+  const_pointer address(const_reference x) const { return &x; }
+
+  pointer allocate(size_type n, ScopedArenaAllocatorAdapter<void>::pointer hint = nullptr) {
+    DCHECK_LE(n, max_size());
+    DebugStackIndirectTopRef::CheckTop();
+    return reinterpret_cast<T*>(arena_stack_->Alloc(n * sizeof(T),
+                                                    ArenaAllocatorAdapterKind::Kind()));
+  }
+  void deallocate(pointer p, size_type n) {
+    DebugStackIndirectTopRef::CheckTop();
+  }
+
+  void construct(pointer p, const_reference val) {
+    // Don't CheckTop(), allow reusing existing capacity of a vector/deque below the top.
+    new (static_cast<void*>(p)) value_type(val);
+  }
+  void destroy(pointer p) {
+    // Don't CheckTop(), allow reusing existing capacity of a vector/deque below the top.
+    p->~value_type();
+  }
+
+ private:
+  ArenaStack* arena_stack_;
+
+  template <typename U>
+  friend class ScopedArenaAllocatorAdapter;
+
+  template <typename U>
+  friend bool operator==(const ScopedArenaAllocatorAdapter<U>& lhs,
+                         const ScopedArenaAllocatorAdapter<U>& rhs);
+};
+
+template <typename T>
+inline bool operator==(const ScopedArenaAllocatorAdapter<T>& lhs,
+                       const ScopedArenaAllocatorAdapter<T>& rhs) {
+  return lhs.arena_stack_ == rhs.arena_stack_;
+}
+
+template <typename T>
+inline bool operator!=(const ScopedArenaAllocatorAdapter<T>& lhs,
+                       const ScopedArenaAllocatorAdapter<T>& rhs) {
+  return !(lhs == rhs);
+}
+
+inline ScopedArenaAllocatorAdapter<void> ScopedArenaAllocator::Adapter(ArenaAllocKind kind) {
+  return ScopedArenaAllocatorAdapter<void>(this, kind);
+}
+
 }  // namespace art
 
 #endif  // ART_COMPILER_UTILS_SCOPED_ARENA_CONTAINERS_H_
diff --git a/dex2oat/dex2oat.cc b/dex2oat/dex2oat.cc
index a78d3f7..0437f30 100644
--- a/dex2oat/dex2oat.cc
+++ b/dex2oat/dex2oat.cc
@@ -430,8 +430,7 @@
       t2.NewTiming("Patching ELF");
       std::string error_msg;
       if (!PatchOatCode(driver.get(), oat_file, oat_location, &error_msg)) {
-        LOG(ERROR) << "Failed to fixup ELF file " << oat_file->GetPath();
-        LOG(ERROR) << "Error was: " << error_msg;
+        LOG(ERROR) << "Failed to fixup ELF file " << oat_file->GetPath() << ": " << error_msg;
         return nullptr;
       }
     }
diff --git a/disassembler/disassembler_x86.cc b/disassembler/disassembler_x86.cc
index 101a55d..0ca8962 100644
--- a/disassembler/disassembler_x86.cc
+++ b/disassembler/disassembler_x86.cc
@@ -268,7 +268,7 @@
     target_specific = true;
     break;
   case 0x63:
-    if (rex == 0x48) {
+    if ((rex & REX_W) != 0) {
       opcode << "movsxd";
       has_modrm = true;
       load = true;
@@ -959,7 +959,7 @@
     byte_operand = true;
     break;
   case 0xB8: case 0xB9: case 0xBA: case 0xBB: case 0xBC: case 0xBD: case 0xBE: case 0xBF:
-    if (rex == 0x48) {
+    if ((rex & REX_W) != 0) {
       opcode << "movabsq";
       immediate_bytes = 8;
       reg_in_opcode = true;
diff --git a/runtime/class_linker.cc b/runtime/class_linker.cc
index 12b7680..c1e3b25 100644
--- a/runtime/class_linker.cc
+++ b/runtime/class_linker.cc
@@ -615,6 +615,14 @@
     argv.push_back("--compiler-filter=verify-none");
   }
 
+  if (Runtime::Current()->MustRelocateIfPossible()) {
+    argv.push_back("--runtime-arg");
+    argv.push_back("-Xrelocate");
+  } else {
+    argv.push_back("--runtime-arg");
+    argv.push_back("-Xnorelocate");
+  }
+
   if (!kIsTargetBuild) {
     argv.push_back("--host");
   }
@@ -680,14 +688,6 @@
   return NULL;
 }
 
-static std::string GetMultiDexClassesDexName(size_t number, const char* dex_location) {
-  if (number == 0) {
-    return dex_location;
-  } else {
-    return StringPrintf("%s" kMultiDexSeparatorString "classes%zu.dex", dex_location, number + 1);
-  }
-}
-
 static bool LoadMultiDexFilesFromOatFile(const OatFile* oat_file, const char* dex_location,
                                          bool generated,
                                          std::vector<std::string>* error_msgs,
@@ -700,7 +700,7 @@
 
   bool success = true;
   for (size_t i = 0; success; ++i) {
-    std::string next_name_str = GetMultiDexClassesDexName(i, dex_location);
+    std::string next_name_str = DexFile::GetMultiDexClassesDexName(i, dex_location);
     const char* next_name = next_name_str.c_str();
 
     uint32_t dex_location_checksum;
@@ -994,11 +994,25 @@
   return oat_file.release();
 }
 
-bool ClassLinker::VerifyOatFileChecksums(const OatFile* oat_file,
-                                         const char* dex_location,
-                                         uint32_t dex_location_checksum,
-                                         const InstructionSet instruction_set,
-                                         std::string* error_msg) {
+bool ClassLinker::VerifyOatImageChecksum(const OatFile* oat_file,
+                                         const InstructionSet instruction_set) {
+  Runtime* runtime = Runtime::Current();
+  const gc::space::ImageSpace* image_space = runtime->GetHeap()->GetImageSpace();
+  uint32_t image_oat_checksum = 0;
+  if (instruction_set == kRuntimeISA) {
+    const ImageHeader& image_header = image_space->GetImageHeader();
+    image_oat_checksum = image_header.GetOatChecksum();
+  } else {
+    std::unique_ptr<ImageHeader> image_header(gc::space::ImageSpace::ReadImageHeaderOrDie(
+        image_space->GetImageLocation().c_str(), instruction_set));
+    image_oat_checksum = image_header->GetOatChecksum();
+  }
+  return oat_file->GetOatHeader().GetImageFileLocationOatChecksum() == image_oat_checksum;
+}
+
+bool ClassLinker::VerifyOatChecksums(const OatFile* oat_file,
+                                     const InstructionSet instruction_set,
+                                     std::string* error_msg) {
   Runtime* runtime = Runtime::Current();
   const gc::space::ImageSpace* image_space = runtime->GetHeap()->GetImageSpace();
 
@@ -1021,9 +1035,28 @@
     image_patch_delta = image_header->GetPatchDelta();
   }
   const OatHeader& oat_header = oat_file->GetOatHeader();
-  bool image_check = ((oat_header.GetImageFileLocationOatChecksum() == image_oat_checksum)
-                      && (oat_header.GetImageFileLocationOatDataBegin() == image_oat_data_begin)
-                      && (oat_header.GetImagePatchDelta() == image_patch_delta));
+  bool ret = ((oat_header.GetImageFileLocationOatChecksum() == image_oat_checksum)
+              && (oat_header.GetImagePatchDelta() == image_patch_delta)
+              && (oat_header.GetImageFileLocationOatDataBegin() == image_oat_data_begin));
+  if (!ret) {
+    *error_msg = StringPrintf("oat file '%s' mismatch (0x%x, %d, %d) with (0x%x, %" PRIdPTR ", %d)",
+                              oat_file->GetLocation().c_str(),
+                              oat_file->GetOatHeader().GetImageFileLocationOatChecksum(),
+                              oat_file->GetOatHeader().GetImageFileLocationOatDataBegin(),
+                              oat_file->GetOatHeader().GetImagePatchDelta(),
+                              image_oat_checksum, image_oat_data_begin, image_patch_delta);
+  }
+  return ret;
+}
+
+bool ClassLinker::VerifyOatAndDexFileChecksums(const OatFile* oat_file,
+                                               const char* dex_location,
+                                               uint32_t dex_location_checksum,
+                                               const InstructionSet instruction_set,
+                                               std::string* error_msg) {
+  if (!VerifyOatChecksums(oat_file, instruction_set, error_msg)) {
+    return false;
+  }
 
   const OatFile::OatDexFile* oat_dex_file = oat_file->GetOatDexFile(dex_location,
                                                                     &dex_location_checksum);
@@ -1039,27 +1072,15 @@
     }
     return false;
   }
-  bool dex_check = dex_location_checksum == oat_dex_file->GetDexFileLocationChecksum();
 
-  if (image_check && dex_check) {
-    return true;
-  }
-
-  if (!image_check) {
-    ScopedObjectAccess soa(Thread::Current());
-    *error_msg = StringPrintf("oat file '%s' mismatch (0x%x, %d) with (0x%x, %" PRIdPTR ")",
-                              oat_file->GetLocation().c_str(),
-                              oat_file->GetOatHeader().GetImageFileLocationOatChecksum(),
-                              oat_file->GetOatHeader().GetImageFileLocationOatDataBegin(),
-                              image_oat_checksum, image_oat_data_begin);
-  }
-  if (!dex_check) {
+  if (dex_location_checksum != oat_dex_file->GetDexFileLocationChecksum()) {
     *error_msg = StringPrintf("oat file '%s' mismatch (0x%x) with '%s' (0x%x)",
                               oat_file->GetLocation().c_str(),
                               oat_dex_file->GetDexFileLocationChecksum(),
                               dex_location, dex_location_checksum);
+    return false;
   }
-  return false;
+  return true;
 }
 
 bool ClassLinker::VerifyOatWithDexFile(const OatFile* oat_file,
@@ -1082,8 +1103,8 @@
     }
     dex_file.reset(oat_dex_file->OpenDexFile(error_msg));
   } else {
-    bool verified = VerifyOatFileChecksums(oat_file, dex_location, dex_location_checksum,
-                                           kRuntimeISA, error_msg);
+    bool verified = VerifyOatAndDexFileChecksums(oat_file, dex_location, dex_location_checksum,
+                                                 kRuntimeISA, error_msg);
     if (!verified) {
       return false;
     }
diff --git a/runtime/class_linker.h b/runtime/class_linker.h
index 1bb1635..8c09042 100644
--- a/runtime/class_linker.h
+++ b/runtime/class_linker.h
@@ -274,12 +274,18 @@
                            std::vector<const DexFile*>* dex_files)
       LOCKS_EXCLUDED(dex_lock_, Locks::mutator_lock_);
 
+  // Returns true if the given oat file has the same image checksum as the image it is paired with.
+  static bool VerifyOatImageChecksum(const OatFile* oat_file, const InstructionSet instruction_set);
+  // Returns true if the oat file checksums match with the image and the offsets are such that it
+  // could be loaded with it.
+  static bool VerifyOatChecksums(const OatFile* oat_file, const InstructionSet instruction_set,
+                                 std::string* error_msg);
   // Returns true if oat file contains the dex file with the given location and checksum.
-  static bool VerifyOatFileChecksums(const OatFile* oat_file,
-                                     const char* dex_location,
-                                     uint32_t dex_location_checksum,
-                                     InstructionSet instruction_set,
-                                     std::string* error_msg);
+  static bool VerifyOatAndDexFileChecksums(const OatFile* oat_file,
+                                           const char* dex_location,
+                                           uint32_t dex_location_checksum,
+                                           InstructionSet instruction_set,
+                                           std::string* error_msg);
 
   // TODO: replace this with multiple methods that allocate the correct managed type.
   template <class T>
diff --git a/runtime/dex_file.cc b/runtime/dex_file.cc
index e5bc7c8..e1a7771 100644
--- a/runtime/dex_file.cc
+++ b/runtime/dex_file.cc
@@ -951,6 +951,38 @@
   return std::make_pair(tmp, colon_ptr + 1);
 }
 
+std::string DexFile::GetMultiDexClassesDexName(size_t number, const char* dex_location) {
+  if (number == 0) {
+    return dex_location;
+  } else {
+    return StringPrintf("%s" kMultiDexSeparatorString "classes%zu.dex", dex_location, number + 1);
+  }
+}
+
+std::string DexFile::GetDexCanonicalLocation(const char* dex_location) {
+  CHECK_NE(dex_location, static_cast<const char*>(nullptr));
+  char* path = nullptr;
+  if (!IsMultiDexLocation(dex_location)) {
+    path = realpath(dex_location, nullptr);
+  } else {
+    std::pair<const char*, const char*> pair = DexFile::SplitMultiDexLocation(dex_location);
+    const char* dex_real_location(realpath(pair.first, nullptr));
+    delete pair.first;
+    if (dex_real_location != nullptr) {
+      int length = strlen(dex_real_location) + strlen(pair.second) + strlen(kMultiDexSeparatorString) + 1;
+      char* multidex_canonical_location = reinterpret_cast<char*>(malloc(sizeof(char) * length));
+      snprintf(multidex_canonical_location, length, "%s" kMultiDexSeparatorString "%s", dex_real_location, pair.second);
+      free(const_cast<char*>(dex_real_location));
+      path = multidex_canonical_location;
+    }
+  }
+
+  // If realpath fails then we just copy the argument.
+  std::string result(path == nullptr ? dex_location : path);
+  free(path);
+  return result;
+}
+
 std::ostream& operator<<(std::ostream& os, const DexFile& dex_file) {
   os << StringPrintf("[DexFile: %s dex-checksum=%08x location-checksum=%08x %p-%p]",
                      dex_file.GetLocation().c_str(),
@@ -958,6 +990,7 @@
                      dex_file.Begin(), dex_file.Begin() + dex_file.Size());
   return os;
 }
+
 std::string Signature::ToString() const {
   if (dex_file_ == nullptr) {
     CHECK(proto_id_ == nullptr);
diff --git a/runtime/dex_file.h b/runtime/dex_file.h
index d64a030..2794af6 100644
--- a/runtime/dex_file.h
+++ b/runtime/dex_file.h
@@ -841,6 +841,23 @@
     return size_;
   }
 
+  static std::string GetMultiDexClassesDexName(size_t number, const char* dex_location);
+
+  // Returns the canonical form of the given dex location.
+  //
+  // There are different flavors of "dex locations" as follows:
+  // the file name of a dex file:
+  //     The actual file path that the dex file has on disk.
+  // dex_location:
+  //     This acts as a key for the class linker to know which dex file to load.
+  //     It may correspond to either an old odex file or a particular dex file
+  //     inside an oat file. In the first case it will also match the file name
+  //     of the dex file. In the second case (oat) it will include the file name
+  //     and possibly some multidex annotation to uniquely identify it.
+  // canonical_dex_location:
+  //     the dex_location where it's file name part has been made canonical.
+  static std::string GetDexCanonicalLocation(const char* dex_location);
+
  private:
   // Opens a .dex file
   static const DexFile* OpenFile(int fd, const char* location, bool verify, std::string* error_msg);
diff --git a/runtime/dex_file_test.cc b/runtime/dex_file_test.cc
index 284aa89..fa13290 100644
--- a/runtime/dex_file_test.cc
+++ b/runtime/dex_file_test.cc
@@ -345,4 +345,31 @@
   }
 }
 
+TEST_F(DexFileTest, GetMultiDexClassesDexName) {
+  std::string dex_location_str = "/system/app/framework.jar";
+  const char* dex_location = dex_location_str.c_str();
+  ASSERT_EQ("/system/app/framework.jar", DexFile::GetMultiDexClassesDexName(0, dex_location));
+  ASSERT_EQ("/system/app/framework.jar:classes2.dex", DexFile::GetMultiDexClassesDexName(1, dex_location));
+  ASSERT_EQ("/system/app/framework.jar:classes101.dex", DexFile::GetMultiDexClassesDexName(100, dex_location));
+}
+
+TEST_F(DexFileTest, GetDexCanonicalLocation) {
+  ScratchFile file;
+  std::string dex_location = file.GetFilename();
+
+  ASSERT_EQ(file.GetFilename(), DexFile::GetDexCanonicalLocation(dex_location.c_str()));
+  std::string multidex_location = DexFile::GetMultiDexClassesDexName(1, dex_location.c_str());
+  ASSERT_EQ(multidex_location, DexFile::GetDexCanonicalLocation(multidex_location.c_str()));
+
+  std::string dex_location_sym = dex_location + "symlink";
+  ASSERT_EQ(0, symlink(dex_location.c_str(), dex_location_sym.c_str()));
+
+  ASSERT_EQ(dex_location, DexFile::GetDexCanonicalLocation(dex_location_sym.c_str()));
+
+  std::string multidex_location_sym = DexFile::GetMultiDexClassesDexName(1, dex_location_sym.c_str());
+  ASSERT_EQ(multidex_location, DexFile::GetDexCanonicalLocation(multidex_location_sym.c_str()));
+
+  ASSERT_EQ(0, unlink(dex_location_sym.c_str()));
+}
+
 }  // namespace art
diff --git a/runtime/elf_file.cc b/runtime/elf_file.cc
index 594c65f..6179b5e 100644
--- a/runtime/elf_file.cc
+++ b/runtime/elf_file.cc
@@ -837,6 +837,7 @@
     }
   }
 
+  bool reserved = false;
   for (Elf32_Word i = 0; i < GetProgramHeaderNum(); i++) {
     Elf32_Phdr& program_header = GetProgramHeader(i);
 
@@ -853,10 +854,8 @@
 
     // Found something to load.
 
-    // If p_vaddr is zero, it must be the first loadable segment,
-    // since they must be in order.  Since it is zero, there isn't a
-    // specific address requested, so first request a contiguous chunk
-    // of required size for all segments, but with no
+    // Before load the actual segments, reserve a contiguous chunk
+    // of required size and address for all segments, but with no
     // permissions. We'll then carve that up with the proper
     // permissions as we load the actual segments. If p_vaddr is
     // non-zero, the segments require the specific address specified,
@@ -870,18 +869,24 @@
       return false;
     }
     size_t file_length = static_cast<size_t>(temp_file_length);
-    if (program_header.p_vaddr == 0) {
+    if (!reserved) {
+      byte* reserve_base = ((program_header.p_vaddr != 0) ?
+                            reinterpret_cast<byte*>(program_header.p_vaddr) : nullptr);
       std::string reservation_name("ElfFile reservation for ");
       reservation_name += file_->GetPath();
       std::unique_ptr<MemMap> reserve(MemMap::MapAnonymous(reservation_name.c_str(),
-                                                     nullptr, GetLoadedSize(), PROT_NONE, false,
-                                                     error_msg));
+                                                           reserve_base,
+                                                           GetLoadedSize(), PROT_NONE, false,
+                                                           error_msg));
       if (reserve.get() == nullptr) {
         *error_msg = StringPrintf("Failed to allocate %s: %s",
                                   reservation_name.c_str(), error_msg->c_str());
         return false;
       }
-      base_address_ = reserve->Begin();
+      reserved = true;
+      if (reserve_base == nullptr) {
+        base_address_ = reserve->Begin();
+      }
       segments_.push_back(reserve.release());
     }
     // empty segment, nothing to map
@@ -1335,7 +1340,8 @@
   const Elf32_Shdr* symtab_sec = all.FindSectionByName(".symtab");
   Elf32_Shdr* text_sec = all.FindSectionByName(".text");
   if (debug_info == nullptr || debug_abbrev == nullptr || eh_frame == nullptr ||
-      debug_str == nullptr || text_sec == nullptr || strtab_sec == nullptr || symtab_sec == nullptr) {
+      debug_str == nullptr || text_sec == nullptr || strtab_sec == nullptr ||
+      symtab_sec == nullptr) {
     return;
   }
   // We need to add in a strtab and symtab to the image.
diff --git a/runtime/gc/space/image_space.cc b/runtime/gc/space/image_space.cc
index 42089af..1d10af2 100644
--- a/runtime/gc/space/image_space.cc
+++ b/runtime/gc/space/image_space.cc
@@ -101,10 +101,7 @@
             << "art base address of 0x" << std::hex << ART_BASE_ADDRESS;
   arg_vector.push_back(StringPrintf("--base=0x%x", ART_BASE_ADDRESS + base_offset));
 
-  if (kIsTargetBuild) {
-    arg_vector.push_back("--image-classes-zip=/system/framework/framework.jar");
-    arg_vector.push_back("--image-classes=preloaded-classes");
-  } else {
+  if (!kIsTargetBuild) {
     arg_vector.push_back("--host");
   }
 
@@ -169,7 +166,8 @@
     return true;
 }
 
-bool ImageSpace::RelocateImage(const char* image_location, const char* dest_filename,
+// Relocate the image at image_location to dest_filename and relocate it by a random amount.
+static bool RelocateImage(const char* image_location, const char* dest_filename,
                                InstructionSet isa, std::string* error_msg) {
   std::string patchoat(Runtime::Current()->GetPatchoatExecutable());
 
@@ -356,8 +354,12 @@
       image_lock.Init(image_filename->c_str(), &error_msg);
       LOG(INFO) << "Using image file " << image_filename->c_str() << " for image location "
                 << image_location;
+      // If we are in /system we can assume the image is good. We can also
+      // assume this if we are using a relocated image (i.e. image checksum
+      // matches) since this is only different by the offset. We need this to
+      // make sure that host tests continue to work.
       space = ImageSpace::Init(image_filename->c_str(), image_location,
-                               !is_system, &error_msg);
+                               !(is_system || relocated_version_used), &error_msg);
     }
     if (space != nullptr) {
       return space;
diff --git a/runtime/gc/space/image_space.h b/runtime/gc/space/image_space.h
index debca52..6be3b8f 100644
--- a/runtime/gc/space/image_space.h
+++ b/runtime/gc/space/image_space.h
@@ -124,9 +124,6 @@
                           bool validate_oat_file, std::string* error_msg)
       SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
 
-  static bool RelocateImage(const char* image_location, const char* dest_filename,
-                            InstructionSet isa, std::string* error_msg);
-
   OatFile* OpenOatFile(const char* image, std::string* error_msg) const
       SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
 
diff --git a/runtime/globals.h b/runtime/globals.h
index 1d9f22c..107e064 100644
--- a/runtime/globals.h
+++ b/runtime/globals.h
@@ -118,6 +118,8 @@
 static constexpr TraceClockSource kDefaultTraceClockSource = kTraceClockSourceWall;
 #endif
 
+static constexpr bool kDefaultMustRelocate = true;
+
 }  // namespace art
 
 #endif  // ART_RUNTIME_GLOBALS_H_
diff --git a/runtime/jni_internal.cc b/runtime/jni_internal.cc
index d5b90f2..43b9912 100644
--- a/runtime/jni_internal.cc
+++ b/runtime/jni_internal.cc
@@ -677,13 +677,15 @@
     return soa.AddLocalReference<jclass>(c->GetSuperClass());
   }
 
+  // Note: java_class1 should be safely castable to java_class2, and
+  // not the other way around.
   static jboolean IsAssignableFrom(JNIEnv* env, jclass java_class1, jclass java_class2) {
     CHECK_NON_NULL_ARGUMENT_RETURN(java_class1, JNI_FALSE);
     CHECK_NON_NULL_ARGUMENT_RETURN(java_class2, JNI_FALSE);
     ScopedObjectAccess soa(env);
     mirror::Class* c1 = soa.Decode<mirror::Class*>(java_class1);
     mirror::Class* c2 = soa.Decode<mirror::Class*>(java_class2);
-    return c1->IsAssignableFrom(c2) ? JNI_TRUE : JNI_FALSE;
+    return c2->IsAssignableFrom(c1) ? JNI_TRUE : JNI_FALSE;
   }
 
   static jboolean IsInstanceOf(JNIEnv* env, jobject jobj, jclass java_class) {
diff --git a/runtime/jni_internal_test.cc b/runtime/jni_internal_test.cc
index 7c7e60c..da3080f 100644
--- a/runtime/jni_internal_test.cc
+++ b/runtime/jni_internal_test.cc
@@ -950,8 +950,28 @@
   jclass string_class = env_->FindClass("java/lang/String");
   ASSERT_NE(string_class, nullptr);
 
-  ASSERT_TRUE(env_->IsAssignableFrom(object_class, string_class));
-  ASSERT_FALSE(env_->IsAssignableFrom(string_class, object_class));
+  // A superclass is assignable from an instance of its
+  // subclass but not vice versa.
+  ASSERT_TRUE(env_->IsAssignableFrom(string_class, object_class));
+  ASSERT_FALSE(env_->IsAssignableFrom(object_class, string_class));
+
+  jclass charsequence_interface = env_->FindClass("java/lang/CharSequence");
+  ASSERT_NE(charsequence_interface, nullptr);
+
+  // An interface is assignable from an instance of an implementing
+  // class but not vice versa.
+  ASSERT_TRUE(env_->IsAssignableFrom(string_class, charsequence_interface));
+  ASSERT_FALSE(env_->IsAssignableFrom(charsequence_interface, string_class));
+
+  // Check that arrays are covariant.
+  jclass string_array_class = env_->FindClass("[Ljava/lang/String;");
+  ASSERT_NE(string_array_class, nullptr);
+  jclass object_array_class = env_->FindClass("[Ljava/lang/Object;");
+  ASSERT_NE(object_array_class, nullptr);
+  ASSERT_TRUE(env_->IsAssignableFrom(string_array_class, object_array_class));
+  ASSERT_FALSE(env_->IsAssignableFrom(object_array_class, string_array_class));
+
+  // Primitive types are tested in 004-JniTest.
 
   // Null as either class should fail.
   CheckJniAbortCatcher jni_abort_catcher;
diff --git a/runtime/mem_map.cc b/runtime/mem_map.cc
index 1074253..6c7ee5b 100644
--- a/runtime/mem_map.cc
+++ b/runtime/mem_map.cc
@@ -130,8 +130,67 @@
 uintptr_t MemMap::next_mem_pos_ = GenerateNextMemPos();
 #endif
 
+// Return true if the address range is contained in a single /proc/self/map entry.
+static bool CheckOverlapping(uintptr_t begin,
+                             uintptr_t end,
+                             std::string* error_msg) {
+  std::unique_ptr<BacktraceMap> map(BacktraceMap::Create(getpid(), true));
+  if (!map->Build()) {
+    *error_msg = StringPrintf("Failed to build process map");
+    return false;
+  }
+  for (BacktraceMap::const_iterator it = map->begin(); it != map->end(); ++it) {
+    if ((begin >= it->start && begin < it->end)  // start of new within old
+        && (end > it->start && end <= it->end)) {  // end of new within old
+      return true;
+    }
+  }
+  std::string maps;
+  ReadFileToString("/proc/self/maps", &maps);
+  *error_msg = StringPrintf("Requested region 0x%08" PRIxPTR "-0x%08" PRIxPTR " does not overlap "
+                            "any existing map:\n%s\n",
+                            begin, end, maps.c_str());
+  return false;
+}
+
+// Return true if the address range does not conflict with any /proc/self/maps entry.
+static bool CheckNonOverlapping(uintptr_t begin,
+                                uintptr_t end,
+                                std::string* error_msg) {
+  std::unique_ptr<BacktraceMap> map(BacktraceMap::Create(getpid(), true));
+  if (!map->Build()) {
+    *error_msg = StringPrintf("Failed to build process map");
+    return false;
+  }
+  for (BacktraceMap::const_iterator it = map->begin(); it != map->end(); ++it) {
+    if ((begin >= it->start && begin < it->end)      // start of new within old
+        || (end > it->start && end < it->end)        // end of new within old
+        || (begin <= it->start && end > it->end)) {  // start/end of new includes all of old
+      std::ostringstream map_info;
+      map_info << std::make_pair(it, map->end());
+      *error_msg = StringPrintf("Requested region 0x%08" PRIxPTR "-0x%08" PRIxPTR " overlaps with "
+                                "existing map 0x%08" PRIxPTR "-0x%08" PRIxPTR " (%s)\n%s",
+                                begin, end,
+                                static_cast<uintptr_t>(it->start), static_cast<uintptr_t>(it->end),
+                                it->name.c_str(),
+                                map_info.str().c_str());
+      return false;
+    }
+  }
+  return true;
+}
+
+// CheckMapRequest to validate a non-MAP_FAILED mmap result based on
+// the expected value, calling munmap if validation fails, giving the
+// reason in error_msg.
+//
+// If the expected_ptr is nullptr, nothing is checked beyond the fact
+// that the actual_ptr is not MAP_FAILED. However, if expected_ptr is
+// non-null, we check that pointer is the actual_ptr == expected_ptr,
+// and if not, report in error_msg what the conflict mapping was if
+// found, or a generic error in other cases.
 static bool CheckMapRequest(byte* expected_ptr, void* actual_ptr, size_t byte_count,
-                            std::ostringstream* error_msg) {
+                            std::string* error_msg) {
   // Handled first by caller for more specific error messages.
   CHECK(actual_ptr != MAP_FAILED);
 
@@ -139,6 +198,10 @@
     return true;
   }
 
+  uintptr_t actual = reinterpret_cast<uintptr_t>(actual_ptr);
+  uintptr_t expected = reinterpret_cast<uintptr_t>(expected_ptr);
+  uintptr_t limit = expected + byte_count;
+
   if (expected_ptr == actual_ptr) {
     return true;
   }
@@ -149,40 +212,19 @@
     PLOG(WARNING) << StringPrintf("munmap(%p, %zd) failed", actual_ptr, byte_count);
   }
 
-  uintptr_t actual = reinterpret_cast<uintptr_t>(actual_ptr);
-  uintptr_t expected = reinterpret_cast<uintptr_t>(expected_ptr);
-  uintptr_t limit = expected + byte_count;
-
-  std::unique_ptr<BacktraceMap> map(BacktraceMap::Create(getpid()));
-  if (!map->Build()) {
-    *error_msg << StringPrintf("Failed to build process map to determine why mmap returned "
-                               "0x%08" PRIxPTR " instead of 0x%08" PRIxPTR, actual, expected);
-
+  if (!CheckNonOverlapping(expected, limit, error_msg)) {
     return false;
   }
-  for (BacktraceMap::const_iterator it = map->begin(); it != map->end(); ++it) {
-    if ((expected >= it->start && expected < it->end)  // start of new within old
-        || (limit > it->start && limit < it->end)      // end of new within old
-        || (expected <= it->start && limit > it->end)) {  // start/end of new includes all of old
-      *error_msg
-          << StringPrintf("Requested region 0x%08" PRIxPTR "-0x%08" PRIxPTR " overlaps with "
-                          "existing map 0x%08" PRIxPTR "-0x%08" PRIxPTR " (%s)\n",
-                          expected, limit,
-                          static_cast<uintptr_t>(it->start), static_cast<uintptr_t>(it->end),
-                          it->name.c_str())
-          << std::make_pair(it, map->end());
-      return false;
-    }
-  }
-  *error_msg << StringPrintf("Failed to mmap at expected address, mapped at "
-                             "0x%08" PRIxPTR " instead of 0x%08" PRIxPTR, actual, expected);
+
+  *error_msg = StringPrintf("Failed to mmap at expected address, mapped at "
+                            "0x%08" PRIxPTR " instead of 0x%08" PRIxPTR, actual, expected);
   return false;
 }
 
-MemMap* MemMap::MapAnonymous(const char* name, byte* expected, size_t byte_count, int prot,
+MemMap* MemMap::MapAnonymous(const char* name, byte* expected_ptr, size_t byte_count, int prot,
                              bool low_4gb, std::string* error_msg) {
   if (byte_count == 0) {
-    return new MemMap(name, nullptr, 0, nullptr, 0, prot);
+    return new MemMap(name, nullptr, 0, nullptr, 0, prot, false);
   }
   size_t page_aligned_byte_count = RoundUp(byte_count, kPageSize);
 
@@ -222,11 +264,11 @@
   // 4GB.
   if (low_4gb && (
       // Start out of bounds.
-      (reinterpret_cast<uintptr_t>(expected) >> 32) != 0 ||
+      (reinterpret_cast<uintptr_t>(expected_ptr) >> 32) != 0 ||
       // End out of bounds. For simplicity, this will fail for the last page of memory.
-      (reinterpret_cast<uintptr_t>(expected + page_aligned_byte_count) >> 32) != 0)) {
+      (reinterpret_cast<uintptr_t>(expected_ptr + page_aligned_byte_count) >> 32) != 0)) {
     *error_msg = StringPrintf("The requested address space (%p, %p) cannot fit in low_4gb",
-                              expected, expected + page_aligned_byte_count);
+                              expected_ptr, expected_ptr + page_aligned_byte_count);
     return nullptr;
   }
 #endif
@@ -238,7 +280,7 @@
 #if USE_ART_LOW_4G_ALLOCATOR
   // MAP_32BIT only available on x86_64.
   void* actual = MAP_FAILED;
-  if (low_4gb && expected == nullptr) {
+  if (low_4gb && expected_ptr == nullptr) {
     bool first_run = true;
 
     for (uintptr_t ptr = next_mem_pos_; ptr < 4 * GB; ptr += kPageSize) {
@@ -294,18 +336,18 @@
       saved_errno = ENOMEM;
     }
   } else {
-    actual = mmap(expected, page_aligned_byte_count, prot, flags, fd.get(), 0);
+    actual = mmap(expected_ptr, page_aligned_byte_count, prot, flags, fd.get(), 0);
     saved_errno = errno;
   }
 
 #else
 #if defined(__LP64__)
-  if (low_4gb && expected == nullptr) {
+  if (low_4gb && expected_ptr == nullptr) {
     flags |= MAP_32BIT;
   }
 #endif
 
-  void* actual = mmap(expected, page_aligned_byte_count, prot, flags, fd.get(), 0);
+  void* actual = mmap(expected_ptr, page_aligned_byte_count, prot, flags, fd.get(), 0);
   saved_errno = errno;
 #endif
 
@@ -314,44 +356,51 @@
     ReadFileToString("/proc/self/maps", &maps);
 
     *error_msg = StringPrintf("Failed anonymous mmap(%p, %zd, 0x%x, 0x%x, %d, 0): %s\n%s",
-                              expected, page_aligned_byte_count, prot, flags, fd.get(),
+                              expected_ptr, page_aligned_byte_count, prot, flags, fd.get(),
                               strerror(saved_errno), maps.c_str());
     return nullptr;
   }
   std::ostringstream check_map_request_error_msg;
-  if (!CheckMapRequest(expected, actual, page_aligned_byte_count, &check_map_request_error_msg)) {
-    *error_msg = check_map_request_error_msg.str();
+  if (!CheckMapRequest(expected_ptr, actual, page_aligned_byte_count, error_msg)) {
     return nullptr;
   }
   return new MemMap(name, reinterpret_cast<byte*>(actual), byte_count, actual,
-                    page_aligned_byte_count, prot);
+                    page_aligned_byte_count, prot, false);
 }
 
-MemMap* MemMap::MapFileAtAddress(byte* expected, size_t byte_count, int prot, int flags, int fd,
+MemMap* MemMap::MapFileAtAddress(byte* expected_ptr, size_t byte_count, int prot, int flags, int fd,
                                  off_t start, bool reuse, const char* filename,
                                  std::string* error_msg) {
   CHECK_NE(0, prot);
   CHECK_NE(0, flags & (MAP_SHARED | MAP_PRIVATE));
+  uintptr_t expected = reinterpret_cast<uintptr_t>(expected_ptr);
+  uintptr_t limit = expected + byte_count;
   if (reuse) {
     // reuse means it is okay that it overlaps an existing page mapping.
     // Only use this if you actually made the page reservation yourself.
-    CHECK(expected != nullptr);
+    CHECK(expected_ptr != nullptr);
+    if (!CheckOverlapping(expected, limit, error_msg)) {
+      return nullptr;
+    }
     flags |= MAP_FIXED;
   } else {
     CHECK_EQ(0, flags & MAP_FIXED);
+    if (expected_ptr != nullptr && !CheckNonOverlapping(expected, limit, error_msg)) {
+      return nullptr;
+    }
   }
 
   if (byte_count == 0) {
-    return new MemMap(filename, nullptr, 0, nullptr, 0, prot);
+    return new MemMap(filename, nullptr, 0, nullptr, 0, prot, false);
   }
   // Adjust 'offset' to be page-aligned as required by mmap.
   int page_offset = start % kPageSize;
   off_t page_aligned_offset = start - page_offset;
   // Adjust 'byte_count' to be page-aligned as we will map this anyway.
   size_t page_aligned_byte_count = RoundUp(byte_count + page_offset, kPageSize);
-  // The 'expected' is modified (if specified, ie non-null) to be page aligned to the file but not
-  // necessarily to virtual memory. mmap will page align 'expected' for us.
-  byte* page_aligned_expected = (expected == nullptr) ? nullptr : (expected - page_offset);
+  // The 'expected_ptr' is modified (if specified, ie non-null) to be page aligned to the file but
+  // not necessarily to virtual memory. mmap will page align 'expected' for us.
+  byte* page_aligned_expected = (expected_ptr == nullptr) ? nullptr : (expected_ptr - page_offset);
 
   byte* actual = reinterpret_cast<byte*>(mmap(page_aligned_expected,
                                               page_aligned_byte_count,
@@ -373,21 +422,22 @@
     return nullptr;
   }
   std::ostringstream check_map_request_error_msg;
-  if (!CheckMapRequest(expected, actual, page_aligned_byte_count, &check_map_request_error_msg)) {
-    *error_msg = check_map_request_error_msg.str();
+  if (!CheckMapRequest(expected_ptr, actual, page_aligned_byte_count, error_msg)) {
     return nullptr;
   }
   return new MemMap(filename, actual + page_offset, byte_count, actual, page_aligned_byte_count,
-                    prot);
+                    prot, reuse);
 }
 
 MemMap::~MemMap() {
   if (base_begin_ == nullptr && base_size_ == 0) {
     return;
   }
-  int result = munmap(base_begin_, base_size_);
-  if (result == -1) {
-    PLOG(FATAL) << "munmap failed";
+  if (!reuse_) {
+    int result = munmap(base_begin_, base_size_);
+    if (result == -1) {
+      PLOG(FATAL) << "munmap failed";
+    }
   }
 
   // Remove it from maps_.
@@ -405,9 +455,9 @@
 }
 
 MemMap::MemMap(const std::string& name, byte* begin, size_t size, void* base_begin,
-               size_t base_size, int prot)
+               size_t base_size, int prot, bool reuse)
     : name_(name), begin_(begin), size_(size), base_begin_(base_begin), base_size_(base_size),
-      prot_(prot) {
+      prot_(prot), reuse_(reuse) {
   if (size_ == 0) {
     CHECK(begin_ == nullptr);
     CHECK(base_begin_ == nullptr);
@@ -437,7 +487,7 @@
   byte* new_base_end = new_end;
   DCHECK_LE(new_base_end, old_base_end);
   if (new_base_end == old_base_end) {
-    return new MemMap(tail_name, nullptr, 0, nullptr, 0, tail_prot);
+    return new MemMap(tail_name, nullptr, 0, nullptr, 0, tail_prot, false);
   }
   size_ = new_end - reinterpret_cast<byte*>(begin_);
   base_size_ = new_base_end - reinterpret_cast<byte*>(base_begin_);
@@ -489,7 +539,7 @@
                               maps.c_str());
     return nullptr;
   }
-  return new MemMap(tail_name, actual, tail_size, actual, tail_base_size, tail_prot);
+  return new MemMap(tail_name, actual, tail_size, actual, tail_base_size, tail_prot, false);
 }
 
 void MemMap::MadviseDontNeedAndZero() {
diff --git a/runtime/mem_map.h b/runtime/mem_map.h
index defa6a5..872c63b 100644
--- a/runtime/mem_map.h
+++ b/runtime/mem_map.h
@@ -73,7 +73,9 @@
 
   // Map part of a file, taking care of non-page aligned offsets.  The
   // "start" offset is absolute, not relative. This version allows
-  // requesting a specific address for the base of the mapping.
+  // requesting a specific address for the base of the
+  // mapping. "reuse" allows us to create a view into an existing
+  // mapping where we do not take ownership of the memory.
   //
   // On success, returns returns a MemMap instance.  On failure, returns a NULL;
   static MemMap* MapFileAtAddress(byte* addr, size_t byte_count, int prot, int flags, int fd,
@@ -134,7 +136,7 @@
 
  private:
   MemMap(const std::string& name, byte* begin, size_t size, void* base_begin, size_t base_size,
-         int prot) LOCKS_EXCLUDED(Locks::mem_maps_lock_);
+         int prot, bool reuse) LOCKS_EXCLUDED(Locks::mem_maps_lock_);
 
   static void DumpMaps(std::ostream& os, const std::multimap<void*, MemMap*>& mem_maps)
       LOCKS_EXCLUDED(Locks::mem_maps_lock_);
@@ -145,7 +147,7 @@
   static MemMap* GetLargestMemMapAt(void* address)
       EXCLUSIVE_LOCKS_REQUIRED(Locks::mem_maps_lock_);
 
-  std::string name_;
+  const std::string name_;
   byte* const begin_;  // Start of data.
   size_t size_;  // Length of data.
 
@@ -153,6 +155,11 @@
   size_t base_size_;  // Length of mapping. May be changed by RemapAtEnd (ie Zygote).
   int prot_;  // Protection of the map.
 
+  // When reuse_ is true, this is just a view of an existing mapping
+  // and we do not take ownership and are not responsible for
+  // unmapping.
+  const bool reuse_;
+
 #if USE_ART_LOW_4G_ALLOCATOR
   static uintptr_t next_mem_pos_;   // Next memory location to check for low_4g extent.
 #endif
diff --git a/runtime/native/dalvik_system_DexFile.cc b/runtime/native/dalvik_system_DexFile.cc
index ac1a310..0af2c22 100644
--- a/runtime/native/dalvik_system_DexFile.cc
+++ b/runtime/native/dalvik_system_DexFile.cc
@@ -275,8 +275,96 @@
   }
 }
 
-static jboolean IsDexOptNeededInternal(JNIEnv* env, const char* filename,
+// Java: dalvik.system.DexFile.UP_TO_DATE
+static const jbyte kUpToDate = 0;
+// Java: dalvik.system.DexFile.DEXOPT_NEEDED
+static const jbyte kPatchoatNeeded = 1;
+// Java: dalvik.system.DexFile.PATCHOAT_NEEDED
+static const jbyte kDexoptNeeded = 2;
+
+template <const bool kVerboseLogging, const bool kReasonLogging>
+static jbyte IsDexOptNeededForFile(const std::string& oat_filename, const char* filename,
+                                   InstructionSet target_instruction_set) {
+  std::string error_msg;
+  std::unique_ptr<const OatFile> oat_file(OatFile::Open(oat_filename, oat_filename, nullptr,
+                                                        false, &error_msg));
+  if (oat_file.get() == nullptr) {
+    if (kVerboseLogging) {
+      LOG(INFO) << "DexFile_isDexOptNeeded failed to open oat file '" << oat_filename
+          << "' for file location '" << filename << "': " << error_msg;
+    }
+    error_msg.clear();
+    return kDexoptNeeded;
+  }
+  bool should_relocate_if_possible = Runtime::Current()->ShouldRelocate();
+  uint32_t location_checksum = 0;
+  const art::OatFile::OatDexFile* oat_dex_file = oat_file->GetOatDexFile(filename, nullptr,
+                                                                          kReasonLogging);
+  if (oat_dex_file != nullptr) {
+    // If its not possible to read the classes.dex assume up-to-date as we won't be able to
+    // compile it anyway.
+    if (!DexFile::GetChecksum(filename, &location_checksum, &error_msg)) {
+      if (kVerboseLogging) {
+        LOG(INFO) << "DexFile_isDexOptNeeded found precompiled stripped file: "
+            << filename << " for " << oat_filename << ": " << error_msg;
+      }
+      if (ClassLinker::VerifyOatChecksums(oat_file.get(), target_instruction_set, &error_msg)) {
+        if (kVerboseLogging) {
+          LOG(INFO) << "DexFile_isDexOptNeeded file " << oat_filename
+                    << " is up-to-date for " << filename;
+        }
+        return kUpToDate;
+      } else if (should_relocate_if_possible &&
+                  ClassLinker::VerifyOatImageChecksum(oat_file.get(), target_instruction_set)) {
+        if (kVerboseLogging) {
+          LOG(INFO) << "DexFile_isDexOptNeeded file " << oat_filename
+                    << " needs to be relocated for " << filename;
+        }
+        return kPatchoatNeeded;
+      } else {
+        if (kVerboseLogging) {
+          LOG(INFO) << "DexFile_isDexOptNeeded file " << oat_filename
+                    << " is out of date for " << filename;
+        }
+        return kDexoptNeeded;
+      }
+      // If we get here the file is out of date and we should use the system one to relocate.
+    } else {
+      if (ClassLinker::VerifyOatAndDexFileChecksums(oat_file.get(), filename, location_checksum,
+                                                    target_instruction_set, &error_msg)) {
+        if (kVerboseLogging) {
+          LOG(INFO) << "DexFile_isDexOptNeeded file " << oat_filename
+                    << " is up-to-date for " << filename;
+        }
+        return kUpToDate;
+      } else if (location_checksum == oat_dex_file->GetDexFileLocationChecksum()
+                  && should_relocate_if_possible
+                  && ClassLinker::VerifyOatImageChecksum(oat_file.get(), target_instruction_set)) {
+        if (kVerboseLogging) {
+          LOG(INFO) << "DexFile_isDexOptNeeded file " << oat_filename
+                    << " needs to be relocated for " << filename;
+        }
+        return kPatchoatNeeded;
+      } else {
+        if (kVerboseLogging) {
+          LOG(INFO) << "DexFile_isDexOptNeeded file " << oat_filename
+                    << " is out of date for " << filename;
+        }
+        return kDexoptNeeded;
+      }
+    }
+  } else {
+    if (kVerboseLogging) {
+      LOG(INFO) << "DexFile_isDexOptNeeded file " << oat_filename
+                << " does not contain " << filename;
+    }
+    return kDexoptNeeded;
+  }
+}
+
+static jbyte IsDexOptNeededInternal(JNIEnv* env, const char* filename,
     const char* pkgname, const char* instruction_set, const jboolean defer) {
+  // TODO disable this logging.
   const bool kVerboseLogging = false;  // Spammy logging.
   const bool kReasonLogging = true;  // Logging of reason for returning JNI_TRUE.
 
@@ -285,7 +373,7 @@
     ScopedLocalRef<jclass> fnfe(env, env->FindClass("java/io/FileNotFoundException"));
     const char* message = (filename == nullptr) ? "<empty file name>" : filename;
     env->ThrowNew(fnfe.get(), message);
-    return JNI_FALSE;
+    return kUpToDate;
   }
 
   // Always treat elements of the bootclasspath as up-to-date.  The
@@ -301,78 +389,45 @@
       if (kVerboseLogging) {
         LOG(INFO) << "DexFile_isDexOptNeeded ignoring boot class path file: " << filename;
       }
-      return JNI_FALSE;
+      return kUpToDate;
     }
   }
 
-  const InstructionSet target_instruction_set = GetInstructionSetFromString(instruction_set);
-
-  // Check if we have an odex file next to the dex file.
-  std::string odex_filename(DexFilenameToOdexFilename(filename, kRuntimeISA));
-  std::string error_msg;
-  std::unique_ptr<const OatFile> oat_file(OatFile::Open(odex_filename, odex_filename, NULL, false,
-                                                        &error_msg));
-  if (oat_file.get() == nullptr) {
-    if (kVerboseLogging) {
-      LOG(INFO) << "DexFile_isDexOptNeeded failed to open oat file '" << filename
-          << "': " << error_msg;
-    }
-    error_msg.clear();
-  } else {
-    const art::OatFile::OatDexFile* oat_dex_file = oat_file->GetOatDexFile(filename, NULL,
-                                                                           kReasonLogging);
-    if (oat_dex_file != nullptr) {
-      uint32_t location_checksum;
-      // If its not possible to read the classes.dex assume up-to-date as we won't be able to
-      // compile it anyway.
-      if (!DexFile::GetChecksum(filename, &location_checksum, &error_msg)) {
-        if (kVerboseLogging) {
-          LOG(INFO) << "DexFile_isDexOptNeeded ignoring precompiled stripped file: "
-              << filename << ": " << error_msg;
-        }
-        return JNI_FALSE;
-      }
-      if (ClassLinker::VerifyOatFileChecksums(oat_file.get(), filename, location_checksum,
-                                              target_instruction_set,
-                                              &error_msg)) {
-        if (kVerboseLogging) {
-          LOG(INFO) << "DexFile_isDexOptNeeded precompiled file " << odex_filename
-              << " has an up-to-date checksum compared to " << filename;
-        }
-        return JNI_FALSE;
-      } else {
-        if (kVerboseLogging) {
-          LOG(INFO) << "DexFile_isDexOptNeeded found precompiled file " << odex_filename
-              << " with an out-of-date checksum compared to " << filename
-              << ": " << error_msg;
-        }
-        error_msg.clear();
-      }
-    }
-  }
+  bool force_system_only = false;
+  bool require_system_version = false;
 
   // Check the profile file.  We need to rerun dex2oat if the profile has changed significantly
   // since the last time, or it's new.
   // If the 'defer' argument is true then this will be retried later.  In this case we
   // need to make sure that the profile file copy is not made so that we will get the
   // same result second time.
+  std::string profile_file;
+  std::string prev_profile_file;
+  bool should_copy_profile = false;
   if (Runtime::Current()->GetProfilerOptions().IsEnabled() && (pkgname != nullptr)) {
-    const std::string profile_file = GetDalvikCacheOrDie("profiles", false /* create_if_absent */)
+    profile_file = GetDalvikCacheOrDie("profiles", false /* create_if_absent */)
         + std::string("/") + pkgname;
-    const std::string prev_profile_file = profile_file + std::string("@old");
+    prev_profile_file = profile_file + std::string("@old");
 
     struct stat profstat, prevstat;
     int e1 = stat(profile_file.c_str(), &profstat);
+    int e1_errno = errno;
     int e2 = stat(prev_profile_file.c_str(), &prevstat);
+    int e2_errno = errno;
     if (e1 < 0) {
-      // No profile file, need to run dex2oat
-      if (kReasonLogging) {
-        LOG(INFO) << "DexFile_isDexOptNeeded profile file " << profile_file << " doesn't exist";
+      if (e1_errno != EACCES) {
+        // No profile file, need to run dex2oat, unless we find a file in system
+        if (kReasonLogging) {
+          LOG(INFO) << "DexFile_isDexOptNeededInternal profile file " << profile_file << " doesn't exist. "
+                    << "Will check odex to see if we can find a working version.";
+        }
+        // Force it to only accept system files/files with versions in system.
+        require_system_version = true;
+      } else {
+        LOG(INFO) << "DexFile_isDexOptNeededInternal recieved EACCES trying to stat profile file "
+                  << profile_file;
       }
-      return JNI_TRUE;
-    }
-
-    if (e2 == 0) {
+    } else if (e2 == 0) {
       // There is a previous profile file.  Check if the profile has changed significantly.
       // A change in profile is considered significant if X% (change_thr property) of the top K%
       // (compile_thr property) samples has changed.
@@ -384,7 +439,7 @@
       bool old_ok = old_profile.LoadFile(prev_profile_file);
       if (!new_ok || !old_ok) {
         if (kVerboseLogging) {
-          LOG(INFO) << "DexFile_isDexOptNeeded Ignoring invalid profiles: "
+          LOG(INFO) << "DexFile_isDexOptNeededInternal Ignoring invalid profiles: "
                     << (new_ok ?  "" : profile_file) << " " << (old_ok ? "" : prev_profile_file);
         }
       } else {
@@ -393,7 +448,7 @@
         old_profile.GetTopKSamples(old_top_k, top_k_threshold);
         if (new_top_k.empty()) {
           if (kVerboseLogging) {
-            LOG(INFO) << "DexFile_isDexOptNeeded empty profile: " << profile_file;
+            LOG(INFO) << "DexFile_isDexOptNeededInternal empty profile: " << profile_file;
           }
           // If the new topK is empty we shouldn't optimize so we leave the change_percent at 0.0.
         } else {
@@ -405,7 +460,7 @@
           if (kVerboseLogging) {
             std::set<std::string>::iterator end = diff.end();
             for (std::set<std::string>::iterator it = diff.begin(); it != end; it++) {
-              LOG(INFO) << "DexFile_isDexOptNeeded new in topK: " << *it;
+              LOG(INFO) << "DexFile_isDexOptNeededInternal new in topK: " << *it;
             }
           }
         }
@@ -413,67 +468,84 @@
 
       if (change_percent > change_threshold) {
         if (kReasonLogging) {
-          LOG(INFO) << "DexFile_isDexOptNeeded size of new profile file " << profile_file <<
+          LOG(INFO) << "DexFile_isDexOptNeededInternal size of new profile file " << profile_file <<
           " is significantly different from old profile file " << prev_profile_file << " (top "
           << top_k_threshold << "% samples changed in proportion of " << change_percent << "%)";
         }
-        if (!defer) {
-          CopyProfileFile(profile_file.c_str(), prev_profile_file.c_str());
-        }
-        return JNI_TRUE;
+        should_copy_profile = !defer;
+        // Force us to only accept system files.
+        force_system_only = true;
       }
-    } else {
+    } else if (e2_errno == ENOENT) {
       // Previous profile does not exist.  Make a copy of the current one.
       if (kVerboseLogging) {
-        LOG(INFO) << "DexFile_isDexOptNeeded previous profile doesn't exist: " << prev_profile_file;
+        LOG(INFO) << "DexFile_isDexOptNeededInternal previous profile doesn't exist: " << prev_profile_file;
       }
-      if (!defer) {
-        CopyProfileFile(profile_file.c_str(), prev_profile_file.c_str());
-      }
+      should_copy_profile = !defer;
+    } else {
+      PLOG(INFO) << "Unable to stat previous profile file " << prev_profile_file;
     }
   }
 
-  // Check if we have an oat file in the cache
-  const std::string cache_dir(GetDalvikCacheOrDie(instruction_set));
-  const std::string cache_location(
-      GetDalvikCacheFilenameOrDie(filename, cache_dir.c_str()));
-  oat_file.reset(OatFile::Open(cache_location, filename, NULL, false, &error_msg));
-  if (oat_file.get() == nullptr) {
-    if (kReasonLogging) {
-      LOG(INFO) << "DexFile_isDexOptNeeded cache file " << cache_location
-          << " does not exist for " << filename << ": " << error_msg;
+  const InstructionSet target_instruction_set = GetInstructionSetFromString(instruction_set);
+
+  // Get the filename for odex file next to the dex file.
+  std::string odex_filename(DexFilenameToOdexFilename(filename, target_instruction_set));
+  // Get the filename for the dalvik-cache file
+  std::string cache_dir;
+  bool have_android_data = false;
+  bool dalvik_cache_exists = false;
+  GetDalvikCache(instruction_set, false, &cache_dir, &have_android_data, &dalvik_cache_exists);
+  std::string cache_filename;  // was cache_location
+  bool have_cache_filename = false;
+  if (dalvik_cache_exists) {
+    std::string error_msg;
+    have_cache_filename = GetDalvikCacheFilename(filename, cache_dir.c_str(), &cache_filename,
+                                                 &error_msg);
+    if (!have_cache_filename && kVerboseLogging) {
+      LOG(INFO) << "DexFile_isDexOptNeededInternal failed to find cache file for dex file " << filename
+                << ": " << error_msg;
     }
-    return JNI_TRUE;
   }
 
-  uint32_t location_checksum;
-  if (!DexFile::GetChecksum(filename, &location_checksum, &error_msg)) {
-    if (kReasonLogging) {
-      LOG(ERROR) << "DexFile_isDexOptNeeded failed to compute checksum of " << filename
-            << " (error " << error_msg << ")";
+  bool should_relocate_if_possible = Runtime::Current()->ShouldRelocate();
+
+  InstructionSet isa = Runtime::Current()->GetInstructionSet();
+  jbyte dalvik_cache_decision = -1;
+  // Lets try the cache first (since we want to load from there since thats where the relocated
+  // versions will be).
+  if (have_cache_filename && !force_system_only) {
+    // We can use the dalvik-cache if we find a good file.
+    dalvik_cache_decision =
+        IsDexOptNeededForFile<kVerboseLogging, kReasonLogging>(cache_filename, filename, isa);
+    // We will only return DexOptNeeded if both the cache and system return it.
+    if (dalvik_cache_decision != kDexoptNeeded && !require_system_version) {
+      CHECK(!(dalvik_cache_decision == kPatchoatNeeded && !should_relocate_if_possible))
+          << "May not return PatchoatNeeded when patching is disabled.";
+      return dalvik_cache_decision;
     }
-    return JNI_TRUE;
+    // We couldn't find one thats easy. We should now try the system.
   }
 
-  if (!ClassLinker::VerifyOatFileChecksums(oat_file.get(), filename, location_checksum,
-                                           target_instruction_set, &error_msg)) {
-    if (kReasonLogging) {
-      LOG(INFO) << "DexFile_isDexOptNeeded cache file " << cache_location
-          << " has out-of-date checksum compared to " << filename
-          << " (error " << error_msg << ")";
-    }
-    return JNI_TRUE;
+  jbyte system_decision =
+      IsDexOptNeededForFile<kVerboseLogging, kReasonLogging>(odex_filename, filename, isa);
+  CHECK(!(system_decision == kPatchoatNeeded && !should_relocate_if_possible))
+      << "May not return PatchoatNeeded when patching is disabled.";
+
+  if (require_system_version && system_decision == kPatchoatNeeded
+                             && dalvik_cache_decision == kUpToDate) {
+    // We have a version from system relocated to the cache. Return it.
+    return dalvik_cache_decision;
   }
 
-  if (kVerboseLogging) {
-    LOG(INFO) << "DexFile_isDexOptNeeded cache file " << cache_location
-              << " is up-to-date for " << filename;
+  if (should_copy_profile && system_decision == kDexoptNeeded) {
+    CopyProfileFile(profile_file.c_str(), prev_profile_file.c_str());
   }
-  CHECK(error_msg.empty()) << error_msg;
-  return JNI_FALSE;
+
+  return system_decision;
 }
 
-static jboolean DexFile_isDexOptNeededInternal(JNIEnv* env, jclass, jstring javaFilename,
+static jbyte DexFile_isDexOptNeededInternal(JNIEnv* env, jclass, jstring javaFilename,
     jstring javaPkgname, jstring javaInstructionSet, jboolean defer) {
   ScopedUtfChars filename(env, javaFilename);
   NullableScopedUtfChars pkgname(env, javaPkgname);
@@ -487,8 +559,8 @@
 static jboolean DexFile_isDexOptNeeded(JNIEnv* env, jclass, jstring javaFilename) {
   const char* instruction_set = GetInstructionSetString(kRuntimeISA);
   ScopedUtfChars filename(env, javaFilename);
-  return IsDexOptNeededInternal(env, filename.c_str(), nullptr /* pkgname */,
-                                instruction_set, false /* defer */);
+  return kUpToDate != IsDexOptNeededInternal(env, filename.c_str(), nullptr /* pkgname */,
+                                             instruction_set, false /* defer */);
 }
 
 
@@ -497,7 +569,7 @@
   NATIVE_METHOD(DexFile, defineClassNative, "(Ljava/lang/String;Ljava/lang/ClassLoader;J)Ljava/lang/Class;"),
   NATIVE_METHOD(DexFile, getClassNameList, "(J)[Ljava/lang/String;"),
   NATIVE_METHOD(DexFile, isDexOptNeeded, "(Ljava/lang/String;)Z"),
-  NATIVE_METHOD(DexFile, isDexOptNeededInternal, "(Ljava/lang/String;Ljava/lang/String;Ljava/lang/String;Z)Z"),
+  NATIVE_METHOD(DexFile, isDexOptNeededInternal, "(Ljava/lang/String;Ljava/lang/String;Ljava/lang/String;Z)B"),
   NATIVE_METHOD(DexFile, openDexFile, "(Ljava/lang/String;Ljava/lang/String;I)J"),
 };
 
diff --git a/runtime/native/java_lang_Runtime.cc b/runtime/native/java_lang_Runtime.cc
index 496a1b2..fb708a2 100644
--- a/runtime/native/java_lang_Runtime.cc
+++ b/runtime/native/java_lang_Runtime.cc
@@ -38,6 +38,7 @@
 }
 
 static void Runtime_nativeExit(JNIEnv*, jclass, jint status) {
+  LOG(INFO) << "System.exit called, status: " << status;
   Runtime::Current()->CallExitHook(status);
   exit(status);
 }
diff --git a/runtime/oat_file.cc b/runtime/oat_file.cc
index f9cc36a..c4c6b10 100644
--- a/runtime/oat_file.cc
+++ b/runtime/oat_file.cc
@@ -18,6 +18,7 @@
 
 #include <dlfcn.h>
 #include <sstream>
+#include <string.h>
 
 #include "base/bit_vector.h"
 #include "base/stl_util.h"
@@ -125,6 +126,9 @@
 }
 
 OatFile::~OatFile() {
+  for (auto it : oat_dex_files_) {
+     delete it.first.data();
+  }
   STLDeleteValues(&oat_dex_files_);
   if (dlopen_handle_ != NULL) {
     dlclose(dlopen_handle_);
@@ -305,8 +309,14 @@
                                               dex_file_checksum,
                                               dex_file_pointer,
                                               methods_offsets_pointer);
-    // Use a StringPiece backed by the oat_dex_file's internal std::string as the key.
-    StringPiece key(oat_dex_file->GetDexFileLocation());
+
+    std::string dex_canonical_location_str = DexFile::GetDexCanonicalLocation(dex_file_location.c_str());
+    // make a copy since we need to persist it as a key in the object's field.
+    int location_size = dex_canonical_location_str.size() + 1;
+    char* dex_canonical_location = new char[location_size ];
+    strncpy(dex_canonical_location, dex_canonical_location_str.c_str(), location_size);
+
+    StringPiece key(dex_canonical_location);
     oat_dex_files_.Put(key, oat_dex_file);
   }
   return true;
@@ -329,7 +339,9 @@
 const OatFile::OatDexFile* OatFile::GetOatDexFile(const char* dex_location,
                                                   const uint32_t* dex_location_checksum,
                                                   bool warn_if_not_found) const {
-  Table::const_iterator it = oat_dex_files_.find(dex_location);
+  std::string dex_canonical_location = DexFile::GetDexCanonicalLocation(dex_location);
+
+  Table::const_iterator it = oat_dex_files_.find(dex_canonical_location);
   if (it != oat_dex_files_.end()) {
     const OatFile::OatDexFile* oat_dex_file = it->second;
     if (dex_location_checksum == NULL ||
@@ -344,15 +356,18 @@
       checksum = StringPrintf("0x%08x", *dex_location_checksum);
     }
     LOG(WARNING) << "Failed to find OatDexFile for DexFile " << dex_location
+                 << " ( canonical path " << dex_canonical_location << ")"
                  << " with checksum " << checksum << " in OatFile " << GetLocation();
     if (kIsDebugBuild) {
       for (Table::const_iterator it = oat_dex_files_.begin(); it != oat_dex_files_.end(); ++it) {
         LOG(WARNING) << "OatFile " << GetLocation()
                      << " contains OatDexFile " << it->second->GetDexFileLocation()
+                     << " (canonical path " << it->first << ")"
                      << " with checksum 0x" << std::hex << it->second->GetDexFileLocationChecksum();
       }
     }
   }
+
   return NULL;
 }
 
diff --git a/runtime/parsed_options.h b/runtime/parsed_options.h
index 668ed9e..3dbe26f 100644
--- a/runtime/parsed_options.h
+++ b/runtime/parsed_options.h
@@ -48,8 +48,6 @@
   std::string native_bridge_library_string_;
   CompilerCallbacks* compiler_callbacks_;
   bool is_zygote_;
-  // TODO Change this to true when we want it on by default.
-  static constexpr bool kDefaultMustRelocate = false;
   bool must_relocate_;
   std::string patchoat_executable_;
   bool interpreter_only_;
diff --git a/runtime/quick/inline_method_analyser.h b/runtime/quick/inline_method_analyser.h
index 982553d..23b9aed 100644
--- a/runtime/quick/inline_method_analyser.h
+++ b/runtime/quick/inline_method_analyser.h
@@ -48,6 +48,11 @@
   kIntrinsicMinMaxFloat,
   kIntrinsicMinMaxDouble,
   kIntrinsicSqrt,
+  kIntrinsicCeil,
+  kIntrinsicFloor,
+  kIntrinsicRint,
+  kIntrinsicRoundFloat,
+  kIntrinsicRoundDouble,
   kIntrinsicGet,
   kIntrinsicCharAt,
   kIntrinsicCompareTo,
diff --git a/runtime/utils.cc b/runtime/utils.cc
index 52cdcc1..4d49809 100644
--- a/runtime/utils.cc
+++ b/runtime/utils.cc
@@ -1236,7 +1236,7 @@
     return false;
   }
   std::string cache_file(&location[1]);  // skip leading slash
-  if (!EndsWith(location, ".dex") && !EndsWith(location, ".art")) {
+  if (!EndsWith(location, ".dex") && !EndsWith(location, ".art") && !EndsWith(location, ".oat")) {
     cache_file += "/";
     cache_file += DexFile::kClassesDex;
   }
diff --git a/runtime/utils_test.cc b/runtime/utils_test.cc
index 7cd5980..d6c90e1 100644
--- a/runtime/utils_test.cc
+++ b/runtime/utils_test.cc
@@ -350,6 +350,8 @@
                GetDalvikCacheFilenameOrDie("/system/framework/core.jar", "/foo").c_str());
   EXPECT_STREQ("/foo/system@framework@boot.art",
                GetDalvikCacheFilenameOrDie("/system/framework/boot.art", "/foo").c_str());
+  EXPECT_STREQ("/foo/system@framework@boot.oat",
+               GetDalvikCacheFilenameOrDie("/system/framework/boot.oat", "/foo").c_str());
 }
 
 TEST_F(UtilsTest, GetSystemImageFilename) {
diff --git a/runtime/verifier/method_verifier-inl.h b/runtime/verifier/method_verifier-inl.h
index 62ecf4b..d4fe106 100644
--- a/runtime/verifier/method_verifier-inl.h
+++ b/runtime/verifier/method_verifier-inl.h
@@ -66,9 +66,9 @@
   return !failure_messages_.empty();
 }
 
-inline const RegType& MethodVerifier::ResolveCheckedClass(uint32_t class_idx) {
+inline RegType& MethodVerifier::ResolveCheckedClass(uint32_t class_idx) {
   DCHECK(!HasFailures());
-  const RegType& result = ResolveClassAndCheckAccess(class_idx);
+  RegType& result = ResolveClassAndCheckAccess(class_idx);
   DCHECK(!HasFailures());
   return result;
 }
diff --git a/runtime/verifier/method_verifier.cc b/runtime/verifier/method_verifier.cc
index 2571cf1..329b4dc 100644
--- a/runtime/verifier/method_verifier.cc
+++ b/runtime/verifier/method_verifier.cc
@@ -1175,7 +1175,7 @@
     // If this is a constructor for a class other than java.lang.Object, mark the first ("this")
     // argument as uninitialized. This restricts field access until the superclass constructor is
     // called.
-    const RegType& declaring_class = GetDeclaringClass();
+    RegType& declaring_class = GetDeclaringClass();
     if (IsConstructor() && !declaring_class.IsJavaLangObject()) {
       reg_line->SetRegisterType(arg_start + cur_arg,
                                 reg_types_.UninitializedThisArgument(declaring_class));
@@ -1207,7 +1207,7 @@
         // it's effectively considered initialized the instant we reach here (in the sense that we
         // can return without doing anything or call virtual methods).
         {
-          const RegType& reg_type = ResolveClassAndCheckAccess(iterator.GetTypeIdx());
+          RegType& reg_type = ResolveClassAndCheckAccess(iterator.GetTypeIdx());
           if (!reg_type.IsNonZeroReferenceTypes()) {
             DCHECK(HasFailures());
             return false;
@@ -1241,8 +1241,8 @@
           return false;
         }
 
-        const RegType& lo_half = descriptor[0] == 'J' ? reg_types_.LongLo() : reg_types_.DoubleLo();
-        const RegType& hi_half = descriptor[0] == 'J' ? reg_types_.LongHi() : reg_types_.DoubleHi();
+        RegType& lo_half = descriptor[0] == 'J' ? reg_types_.LongLo() : reg_types_.DoubleLo();
+        RegType& hi_half = descriptor[0] == 'J' ? reg_types_.LongHi() : reg_types_.DoubleHi();
         reg_line->SetRegisterTypeWide(arg_start + cur_arg, lo_half, hi_half);
         cur_arg++;
         break;
@@ -1536,7 +1536,7 @@
        * This statement can only appear as the first instruction in an exception handler. We verify
        * that as part of extracting the exception type from the catch block list.
        */
-      const RegType& res_type = GetCaughtExceptionType();
+      RegType& res_type = GetCaughtExceptionType();
       work_line_->SetRegisterType(inst->VRegA_11x(), res_type);
       break;
     }
@@ -1550,7 +1550,7 @@
     case Instruction::RETURN:
       if (!IsConstructor() || work_line_->CheckConstructorReturn()) {
         /* check the method signature */
-        const RegType& return_type = GetMethodReturnType();
+        RegType& return_type = GetMethodReturnType();
         if (!return_type.IsCategory1Types()) {
           Fail(VERIFY_ERROR_BAD_CLASS_HARD) << "unexpected non-category 1 return type "
                                             << return_type;
@@ -1558,7 +1558,7 @@
           // Compilers may generate synthetic functions that write byte values into boolean fields.
           // Also, it may use integer values for boolean, byte, short, and character return types.
           const uint32_t vregA = inst->VRegA_11x();
-          const RegType& src_type = work_line_->GetRegisterType(vregA);
+          RegType& src_type = work_line_->GetRegisterType(vregA);
           bool use_src = ((return_type.IsBoolean() && src_type.IsByte()) ||
                           ((return_type.IsBoolean() || return_type.IsByte() ||
                            return_type.IsShort() || return_type.IsChar()) &&
@@ -1575,7 +1575,7 @@
     case Instruction::RETURN_WIDE:
       if (!IsConstructor() || work_line_->CheckConstructorReturn()) {
         /* check the method signature */
-        const RegType& return_type = GetMethodReturnType();
+        RegType& return_type = GetMethodReturnType();
         if (!return_type.IsCategory2Types()) {
           Fail(VERIFY_ERROR_BAD_CLASS_HARD) << "return-wide not expected";
         } else {
@@ -1590,7 +1590,7 @@
       break;
     case Instruction::RETURN_OBJECT:
       if (!IsConstructor() || work_line_->CheckConstructorReturn()) {
-        const RegType& return_type = GetMethodReturnType();
+        RegType& return_type = GetMethodReturnType();
         if (!return_type.IsReferenceTypes()) {
           Fail(VERIFY_ERROR_BAD_CLASS_HARD) << "return-object not expected";
         } else {
@@ -1598,7 +1598,7 @@
           DCHECK(!return_type.IsZero());
           DCHECK(!return_type.IsUninitializedReference());
           const uint32_t vregA = inst->VRegA_11x();
-          const RegType& reg_type = work_line_->GetRegisterType(vregA);
+          RegType& reg_type = work_line_->GetRegisterType(vregA);
           // Disallow returning uninitialized values and verify that the reference in vAA is an
           // instance of the "return_type"
           if (reg_type.IsUninitializedTypes()) {
@@ -1645,29 +1645,29 @@
       /* could be long or double; resolved upon use */
     case Instruction::CONST_WIDE_16: {
       int64_t val = static_cast<int16_t>(inst->VRegB_21s());
-      const RegType& lo = reg_types_.FromCat2ConstLo(static_cast<int32_t>(val), true);
-      const RegType& hi = reg_types_.FromCat2ConstHi(static_cast<int32_t>(val >> 32), true);
+      RegType& lo = reg_types_.FromCat2ConstLo(static_cast<int32_t>(val), true);
+      RegType& hi = reg_types_.FromCat2ConstHi(static_cast<int32_t>(val >> 32), true);
       work_line_->SetRegisterTypeWide(inst->VRegA_21s(), lo, hi);
       break;
     }
     case Instruction::CONST_WIDE_32: {
       int64_t val = static_cast<int32_t>(inst->VRegB_31i());
-      const RegType& lo = reg_types_.FromCat2ConstLo(static_cast<int32_t>(val), true);
-      const RegType& hi = reg_types_.FromCat2ConstHi(static_cast<int32_t>(val >> 32), true);
+      RegType& lo = reg_types_.FromCat2ConstLo(static_cast<int32_t>(val), true);
+      RegType& hi = reg_types_.FromCat2ConstHi(static_cast<int32_t>(val >> 32), true);
       work_line_->SetRegisterTypeWide(inst->VRegA_31i(), lo, hi);
       break;
     }
     case Instruction::CONST_WIDE: {
       int64_t val = inst->VRegB_51l();
-      const RegType& lo = reg_types_.FromCat2ConstLo(static_cast<int32_t>(val), true);
-      const RegType& hi = reg_types_.FromCat2ConstHi(static_cast<int32_t>(val >> 32), true);
+      RegType& lo = reg_types_.FromCat2ConstLo(static_cast<int32_t>(val), true);
+      RegType& hi = reg_types_.FromCat2ConstHi(static_cast<int32_t>(val >> 32), true);
       work_line_->SetRegisterTypeWide(inst->VRegA_51l(), lo, hi);
       break;
     }
     case Instruction::CONST_WIDE_HIGH16: {
       int64_t val = static_cast<uint64_t>(inst->VRegB_21h()) << 48;
-      const RegType& lo = reg_types_.FromCat2ConstLo(static_cast<int32_t>(val), true);
-      const RegType& hi = reg_types_.FromCat2ConstHi(static_cast<int32_t>(val >> 32), true);
+      RegType& lo = reg_types_.FromCat2ConstLo(static_cast<int32_t>(val), true);
+      RegType& hi = reg_types_.FromCat2ConstHi(static_cast<int32_t>(val >> 32), true);
       work_line_->SetRegisterTypeWide(inst->VRegA_21h(), lo, hi);
       break;
     }
@@ -1680,7 +1680,7 @@
     case Instruction::CONST_CLASS: {
       // Get type from instruction if unresolved then we need an access check
       // TODO: check Compiler::CanAccessTypeWithoutChecks returns false when res_type is unresolved
-      const RegType& res_type = ResolveClassAndCheckAccess(inst->VRegB_21c());
+      RegType& res_type = ResolveClassAndCheckAccess(inst->VRegB_21c());
       // Register holds class, ie its type is class, on error it will hold Conflict.
       work_line_->SetRegisterType(inst->VRegA_21c(),
                                   res_type.IsConflict() ? res_type
@@ -1726,7 +1726,7 @@
        */
       const bool is_checkcast = (inst->Opcode() == Instruction::CHECK_CAST);
       const uint32_t type_idx = (is_checkcast) ? inst->VRegB_21c() : inst->VRegC_22c();
-      const RegType& res_type = ResolveClassAndCheckAccess(type_idx);
+      RegType& res_type = ResolveClassAndCheckAccess(type_idx);
       if (res_type.IsConflict()) {
         // If this is a primitive type, fail HARD.
         mirror::Class* klass = (*dex_cache_)->GetResolvedType(type_idx);
@@ -1745,7 +1745,7 @@
       }
       // TODO: check Compiler::CanAccessTypeWithoutChecks returns false when res_type is unresolved
       uint32_t orig_type_reg = (is_checkcast) ? inst->VRegA_21c() : inst->VRegB_22c();
-      const RegType& orig_type = work_line_->GetRegisterType(orig_type_reg);
+      RegType& orig_type = work_line_->GetRegisterType(orig_type_reg);
       if (!res_type.IsNonZeroReferenceTypes()) {
         if (is_checkcast) {
           Fail(VERIFY_ERROR_BAD_CLASS_HARD) << "check-cast on unexpected class " << res_type;
@@ -1768,7 +1768,7 @@
       break;
     }
     case Instruction::ARRAY_LENGTH: {
-      const RegType& res_type = work_line_->GetRegisterType(inst->VRegB_12x());
+      RegType& res_type = work_line_->GetRegisterType(inst->VRegB_12x());
       if (res_type.IsReferenceTypes()) {
         if (!res_type.IsArrayTypes() && !res_type.IsZero()) {  // ie not an array or null
           Fail(VERIFY_ERROR_BAD_CLASS_HARD) << "array-length on non-array " << res_type;
@@ -1781,7 +1781,7 @@
       break;
     }
     case Instruction::NEW_INSTANCE: {
-      const RegType& res_type = ResolveClassAndCheckAccess(inst->VRegB_21c());
+      RegType& res_type = ResolveClassAndCheckAccess(inst->VRegB_21c());
       if (res_type.IsConflict()) {
         DCHECK_NE(failures_.size(), 0U);
         break;  // bad class
@@ -1793,7 +1793,7 @@
             << "new-instance on primitive, interface or abstract class" << res_type;
         // Soft failure so carry on to set register type.
       }
-      const RegType& uninit_type = reg_types_.Uninitialized(res_type, work_insn_idx_);
+      RegType& uninit_type = reg_types_.Uninitialized(res_type, work_insn_idx_);
       // Any registers holding previous allocations from this address that have not yet been
       // initialized must be marked invalid.
       work_line_->MarkUninitRefsAsInvalid(uninit_type);
@@ -1846,7 +1846,7 @@
       work_line_->SetRegisterType(inst->VRegA_23x(), reg_types_.Integer());
       break;
     case Instruction::THROW: {
-      const RegType& res_type = work_line_->GetRegisterType(inst->VRegA_11x());
+      RegType& res_type = work_line_->GetRegisterType(inst->VRegA_11x());
       if (!reg_types_.JavaLangThrowable(false).IsAssignableFrom(res_type)) {
         Fail(res_type.IsUnresolvedTypes() ? VERIFY_ERROR_NO_CLASS : VERIFY_ERROR_BAD_CLASS_SOFT)
             << "thrown class " << res_type << " not instanceof Throwable";
@@ -1867,14 +1867,14 @@
 
     case Instruction::FILL_ARRAY_DATA: {
       /* Similar to the verification done for APUT */
-      const RegType& array_type = work_line_->GetRegisterType(inst->VRegA_31t());
+      RegType& array_type = work_line_->GetRegisterType(inst->VRegA_31t());
       /* array_type can be null if the reg type is Zero */
       if (!array_type.IsZero()) {
         if (!array_type.IsArrayTypes()) {
           Fail(VERIFY_ERROR_BAD_CLASS_HARD) << "invalid fill-array-data with array type "
                                             << array_type;
         } else {
-          const RegType& component_type = reg_types_.GetComponentType(array_type,
+          RegType& component_type = reg_types_.GetComponentType(array_type,
                                                                       class_loader_->Get());
           DCHECK(!component_type.IsConflict());
           if (component_type.IsNonZeroReferenceTypes()) {
@@ -1902,8 +1902,8 @@
     }
     case Instruction::IF_EQ:
     case Instruction::IF_NE: {
-      const RegType& reg_type1 = work_line_->GetRegisterType(inst->VRegA_22t());
-      const RegType& reg_type2 = work_line_->GetRegisterType(inst->VRegB_22t());
+      RegType& reg_type1 = work_line_->GetRegisterType(inst->VRegA_22t());
+      RegType& reg_type2 = work_line_->GetRegisterType(inst->VRegB_22t());
       bool mismatch = false;
       if (reg_type1.IsZero()) {  // zero then integral or reference expected
         mismatch = !reg_type2.IsReferenceTypes() && !reg_type2.IsIntegralTypes();
@@ -1922,8 +1922,8 @@
     case Instruction::IF_GE:
     case Instruction::IF_GT:
     case Instruction::IF_LE: {
-      const RegType& reg_type1 = work_line_->GetRegisterType(inst->VRegA_22t());
-      const RegType& reg_type2 = work_line_->GetRegisterType(inst->VRegB_22t());
+      RegType& reg_type1 = work_line_->GetRegisterType(inst->VRegA_22t());
+      RegType& reg_type2 = work_line_->GetRegisterType(inst->VRegB_22t());
       if (!reg_type1.IsIntegralTypes() || !reg_type2.IsIntegralTypes()) {
         Fail(VERIFY_ERROR_BAD_CLASS_HARD) << "args to 'if' (" << reg_type1 << ","
                                           << reg_type2 << ") must be integral";
@@ -1932,7 +1932,7 @@
     }
     case Instruction::IF_EQZ:
     case Instruction::IF_NEZ: {
-      const RegType& reg_type = work_line_->GetRegisterType(inst->VRegA_21t());
+      RegType& reg_type = work_line_->GetRegisterType(inst->VRegA_21t());
       if (!reg_type.IsReferenceTypes() && !reg_type.IsIntegralTypes()) {
         Fail(VERIFY_ERROR_BAD_CLASS_HARD) << "type " << reg_type
                                           << " unexpected as arg to if-eqz/if-nez";
@@ -1978,8 +1978,8 @@
         // type is assignable to the original then allow optimization. This check is performed to
         // ensure that subsequent merges don't lose type information - such as becoming an
         // interface from a class that would lose information relevant to field checks.
-        const RegType& orig_type = work_line_->GetRegisterType(instance_of_inst->VRegB_22c());
-        const RegType& cast_type = ResolveClassAndCheckAccess(instance_of_inst->VRegC_22c());
+        RegType& orig_type = work_line_->GetRegisterType(instance_of_inst->VRegB_22c());
+        RegType& cast_type = ResolveClassAndCheckAccess(instance_of_inst->VRegC_22c());
 
         if (!orig_type.Equals(cast_type) &&
             !cast_type.IsUnresolvedTypes() && !orig_type.IsUnresolvedTypes() &&
@@ -2034,7 +2034,7 @@
     case Instruction::IF_GEZ:
     case Instruction::IF_GTZ:
     case Instruction::IF_LEZ: {
-      const RegType& reg_type = work_line_->GetRegisterType(inst->VRegA_21t());
+      RegType& reg_type = work_line_->GetRegisterType(inst->VRegA_21t());
       if (!reg_type.IsIntegralTypes()) {
         Fail(VERIFY_ERROR_BAD_CLASS_HARD) << "type " << reg_type
                                           << " unexpected as arg to if-ltz/if-gez/if-gtz/if-lez";
@@ -2183,7 +2183,7 @@
                        inst->Opcode() == Instruction::INVOKE_SUPER_RANGE);
       mirror::ArtMethod* called_method = VerifyInvocationArgs(inst, METHOD_VIRTUAL, is_range,
                                                               is_super);
-      const RegType* return_type = nullptr;
+      RegType* return_type = nullptr;
       if (called_method != nullptr) {
         Thread* self = Thread::Current();
         StackHandleScope<1> hs(self);
@@ -2239,7 +2239,7 @@
          * allowing the latter only if the "this" argument is the same as the "this" argument to
          * this method (which implies that we're in a constructor ourselves).
          */
-        const RegType& this_type = work_line_->GetInvocationThis(inst, is_range);
+        RegType& this_type = work_line_->GetInvocationThis(inst, is_range);
         if (this_type.IsConflict())  // failure.
           break;
 
@@ -2250,7 +2250,7 @@
         }
 
         /* must be in same class or in superclass */
-        // const RegType& this_super_klass = this_type.GetSuperClass(&reg_types_);
+        // RegType& this_super_klass = this_type.GetSuperClass(&reg_types_);
         // TODO: re-enable constructor type verification
         // if (this_super_klass.IsConflict()) {
           // Unknown super class, fail so we re-check at runtime.
@@ -2271,7 +2271,7 @@
          */
         work_line_->MarkRefsAsInitialized(this_type);
       }
-      const RegType& return_type = reg_types_.FromDescriptor(class_loader_->Get(),
+      RegType& return_type = reg_types_.FromDescriptor(class_loader_->Get(),
                                                              return_type_descriptor, false);
       if (!return_type.IsLowHalf()) {
         work_line_->SetResultRegisterType(return_type);
@@ -2297,7 +2297,7 @@
         } else {
           descriptor = called_method->GetReturnTypeDescriptor();
         }
-        const RegType& return_type = reg_types_.FromDescriptor(class_loader_->Get(), descriptor,
+        RegType& return_type = reg_types_.FromDescriptor(class_loader_->Get(), descriptor,
                                                                false);
         if (!return_type.IsLowHalf()) {
           work_line_->SetResultRegisterType(return_type);
@@ -2325,7 +2325,7 @@
       /* Get the type of the "this" arg, which should either be a sub-interface of called
        * interface or Object (see comments in RegType::JoinClass).
        */
-      const RegType& this_type = work_line_->GetInvocationThis(inst, is_range);
+      RegType& this_type = work_line_->GetInvocationThis(inst, is_range);
       if (this_type.IsZero()) {
         /* null pointer always passes (and always fails at runtime) */
       } else {
@@ -2355,7 +2355,7 @@
       } else {
         descriptor = abs_method->GetReturnTypeDescriptor();
       }
-      const RegType& return_type = reg_types_.FromDescriptor(class_loader_->Get(), descriptor,
+      RegType& return_type = reg_types_.FromDescriptor(class_loader_->Get(), descriptor,
                                                              false);
       if (!return_type.IsLowHalf()) {
         work_line_->SetResultRegisterType(return_type);
@@ -2621,7 +2621,7 @@
       mirror::ArtMethod* called_method = VerifyInvokeVirtualQuickArgs(inst, is_range);
       if (called_method != NULL) {
         const char* descriptor = called_method->GetReturnTypeDescriptor();
-        const RegType& return_type = reg_types_.FromDescriptor(class_loader_->Get(), descriptor,
+        RegType& return_type = reg_types_.FromDescriptor(class_loader_->Get(), descriptor,
                                                                false);
         if (!return_type.IsLowHalf()) {
           work_line_->SetResultRegisterType(return_type);
@@ -2905,11 +2905,11 @@
   return true;
 }  // NOLINT(readability/fn_size)
 
-const RegType& MethodVerifier::ResolveClassAndCheckAccess(uint32_t class_idx) {
+RegType& MethodVerifier::ResolveClassAndCheckAccess(uint32_t class_idx) {
   const char* descriptor = dex_file_->StringByTypeIdx(class_idx);
-  const RegType& referrer = GetDeclaringClass();
+  RegType& referrer = GetDeclaringClass();
   mirror::Class* klass = (*dex_cache_)->GetResolvedType(class_idx);
-  const RegType& result =
+  RegType& result =
       klass != NULL ? reg_types_.FromClass(descriptor, klass,
                                            klass->CannotBeAssignedFromOtherTypes())
                     : reg_types_.FromDescriptor(class_loader_->Get(), descriptor, false);
@@ -2932,8 +2932,8 @@
   return result;
 }
 
-const RegType& MethodVerifier::GetCaughtExceptionType() {
-  const RegType* common_super = NULL;
+RegType& MethodVerifier::GetCaughtExceptionType() {
+  RegType* common_super = NULL;
   if (code_item_->tries_size_ != 0) {
     const byte* handlers_ptr = DexFile::GetCatchHandlerData(*code_item_, 0);
     uint32_t handlers_size = DecodeUnsignedLeb128(&handlers_ptr);
@@ -2944,7 +2944,7 @@
           if (iterator.GetHandlerTypeIndex() == DexFile::kDexNoIndex16) {
             common_super = &reg_types_.JavaLangThrowable(false);
           } else {
-            const RegType& exception = ResolveClassAndCheckAccess(iterator.GetHandlerTypeIndex());
+            RegType& exception = ResolveClassAndCheckAccess(iterator.GetHandlerTypeIndex());
             if (!reg_types_.JavaLangThrowable(false).IsAssignableFrom(exception)) {
               if (exception.IsUnresolvedTypes()) {
                 // We don't know enough about the type. Fail here and let runtime handle it.
@@ -2979,7 +2979,7 @@
 mirror::ArtMethod* MethodVerifier::ResolveMethodAndCheckAccess(uint32_t dex_method_idx,
                                                                MethodType method_type) {
   const DexFile::MethodId& method_id = dex_file_->GetMethodId(dex_method_idx);
-  const RegType& klass_type = ResolveClassAndCheckAccess(method_id.class_idx_);
+  RegType& klass_type = ResolveClassAndCheckAccess(method_id.class_idx_);
   if (klass_type.IsConflict()) {
     std::string append(" in attempt to access method ");
     append += dex_file_->GetMethodName(method_id);
@@ -2990,7 +2990,7 @@
     return NULL;  // Can't resolve Class so no more to do here
   }
   mirror::Class* klass = klass_type.GetClass();
-  const RegType& referrer = GetDeclaringClass();
+  RegType& referrer = GetDeclaringClass();
   mirror::ArtMethod* res_method = (*dex_cache_)->GetResolvedMethod(dex_method_idx);
   if (res_method == NULL) {
     const char* name = dex_file_->GetMethodName(method_id);
@@ -3097,7 +3097,7 @@
    * rigorous check here (which is okay since we have to do it at runtime).
    */
   if (method_type != METHOD_STATIC) {
-    const RegType& actual_arg_type = work_line_->GetInvocationThis(inst, is_range);
+    RegType& actual_arg_type = work_line_->GetInvocationThis(inst, is_range);
     if (actual_arg_type.IsConflict()) {  // GetInvocationThis failed.
       CHECK(have_pending_hard_failure_);
       return nullptr;
@@ -3111,14 +3111,14 @@
       } else {
         // Check whether the name of the called method is "<init>"
         const uint32_t method_idx = (is_range) ? inst->VRegB_3rc() : inst->VRegB_35c();
-        if (strcmp(dex_file_->GetMethodName(dex_file_->GetMethodId(method_idx)), "init") != 0) {
+        if (strcmp(dex_file_->GetMethodName(dex_file_->GetMethodId(method_idx)), "<init>") != 0) {
           Fail(VERIFY_ERROR_BAD_CLASS_HARD) << "'this' arg must be initialized";
           return nullptr;
         }
       }
     }
     if (method_type != METHOD_INTERFACE && !actual_arg_type.IsZero()) {
-      const RegType* res_method_class;
+      RegType* res_method_class;
       if (res_method != nullptr) {
         mirror::Class* klass = res_method->GetDeclaringClass();
         res_method_class = &reg_types_.FromClass(klass->GetDescriptor().c_str(), klass,
@@ -3159,12 +3159,12 @@
       return nullptr;
     }
 
-    const RegType& reg_type = reg_types_.FromDescriptor(class_loader_->Get(), param_descriptor,
+    RegType& reg_type = reg_types_.FromDescriptor(class_loader_->Get(), param_descriptor,
                                                         false);
     uint32_t get_reg = is_range ? inst->VRegC_3rc() + static_cast<uint32_t>(sig_registers) :
         arg[sig_registers];
     if (reg_type.IsIntegralTypes()) {
-      const RegType& src_type = work_line_->GetRegisterType(get_reg);
+      RegType& src_type = work_line_->GetRegisterType(get_reg);
       if (!src_type.IsIntegralTypes()) {
         Fail(VERIFY_ERROR_BAD_CLASS_HARD) << "register v" << get_reg << " has type " << src_type
             << " but expected " << reg_type;
@@ -3247,7 +3247,7 @@
   // has a vtable entry for the target method.
   if (is_super) {
     DCHECK(method_type == METHOD_VIRTUAL);
-    const RegType& super = GetDeclaringClass().GetSuperClass(&reg_types_);
+    RegType& super = GetDeclaringClass().GetSuperClass(&reg_types_);
     if (super.IsUnresolvedTypes()) {
       Fail(VERIFY_ERROR_NO_METHOD) << "unknown super class in invoke-super from "
                                    << PrettyMethod(dex_method_idx_, *dex_file_)
@@ -3275,7 +3275,7 @@
                                                          RegisterLine* reg_line, bool is_range) {
   DCHECK(inst->Opcode() == Instruction::INVOKE_VIRTUAL_QUICK ||
          inst->Opcode() == Instruction::INVOKE_VIRTUAL_RANGE_QUICK);
-  const RegType& actual_arg_type = reg_line->GetInvocationThis(inst, is_range);
+  RegType& actual_arg_type = reg_line->GetInvocationThis(inst, is_range);
   if (!actual_arg_type.HasClass()) {
     VLOG(verifier) << "Failed to get mirror::Class* from '" << actual_arg_type << "'";
     return nullptr;
@@ -3313,7 +3313,7 @@
   // We use vAA as our expected arg count, rather than res_method->insSize, because we need to
   // match the call to the signature. Also, we might be calling through an abstract method
   // definition (which doesn't have register count values).
-  const RegType& actual_arg_type = work_line_->GetInvocationThis(inst, is_range);
+  RegType& actual_arg_type = work_line_->GetInvocationThis(inst, is_range);
   if (actual_arg_type.IsConflict()) {  // GetInvocationThis failed.
     return NULL;
   }
@@ -3337,7 +3337,7 @@
   }
   if (!actual_arg_type.IsZero()) {
     mirror::Class* klass = res_method->GetDeclaringClass();
-    const RegType& res_method_class =
+    RegType& res_method_class =
         reg_types_.FromClass(klass->GetDescriptor().c_str(), klass,
                              klass->CannotBeAssignedFromOtherTypes());
     if (!res_method_class.IsAssignableFrom(actual_arg_type)) {
@@ -3373,7 +3373,7 @@
                                         << " missing signature component";
       return NULL;
     }
-    const RegType& reg_type = reg_types_.FromDescriptor(class_loader_->Get(), descriptor, false);
+    RegType& reg_type = reg_types_.FromDescriptor(class_loader_->Get(), descriptor, false);
     uint32_t get_reg = is_range ? inst->VRegC_3rc() + actual_args : arg[actual_args];
     if (!work_line_->VerifyRegisterType(get_reg, reg_type)) {
       return res_method;
@@ -3401,7 +3401,7 @@
     DCHECK_EQ(inst->Opcode(), Instruction::FILLED_NEW_ARRAY_RANGE);
     type_idx = inst->VRegB_3rc();
   }
-  const RegType& res_type = ResolveClassAndCheckAccess(type_idx);
+  RegType& res_type = ResolveClassAndCheckAccess(type_idx);
   if (res_type.IsConflict()) {  // bad class
     DCHECK_NE(failures_.size(), 0U);
   } else {
@@ -3412,12 +3412,12 @@
       /* make sure "size" register is valid type */
       work_line_->VerifyRegisterType(inst->VRegB_22c(), reg_types_.Integer());
       /* set register type to array class */
-      const RegType& precise_type = reg_types_.FromUninitialized(res_type);
+      RegType& precise_type = reg_types_.FromUninitialized(res_type);
       work_line_->SetRegisterType(inst->VRegA_22c(), precise_type);
     } else {
       // Verify each register. If "arg_count" is bad, VerifyRegisterType() will run off the end of
       // the list and fail. It's legal, if silly, for arg_count to be zero.
-      const RegType& expected_type = reg_types_.GetComponentType(res_type, class_loader_->Get());
+      RegType& expected_type = reg_types_.GetComponentType(res_type, class_loader_->Get());
       uint32_t arg_count = (is_range) ? inst->VRegA_3rc() : inst->VRegA_35c();
       uint32_t arg[5];
       if (!is_range) {
@@ -3431,19 +3431,19 @@
         }
       }
       // filled-array result goes into "result" register
-      const RegType& precise_type = reg_types_.FromUninitialized(res_type);
+      RegType& precise_type = reg_types_.FromUninitialized(res_type);
       work_line_->SetResultRegisterType(precise_type);
     }
   }
 }
 
 void MethodVerifier::VerifyAGet(const Instruction* inst,
-                                const RegType& insn_type, bool is_primitive) {
-  const RegType& index_type = work_line_->GetRegisterType(inst->VRegC_23x());
+                                RegType& insn_type, bool is_primitive) {
+  RegType& index_type = work_line_->GetRegisterType(inst->VRegC_23x());
   if (!index_type.IsArrayIndexTypes()) {
     Fail(VERIFY_ERROR_BAD_CLASS_HARD) << "Invalid reg type for array index (" << index_type << ")";
   } else {
-    const RegType& array_type = work_line_->GetRegisterType(inst->VRegB_23x());
+    RegType& array_type = work_line_->GetRegisterType(inst->VRegB_23x());
     if (array_type.IsZero()) {
       // Null array class; this code path will fail at runtime. Infer a merge-able type from the
       // instruction type. TODO: have a proper notion of bottom here.
@@ -3459,7 +3459,7 @@
       Fail(VERIFY_ERROR_BAD_CLASS_HARD) << "not array type " << array_type << " with aget";
     } else {
       /* verify the class */
-      const RegType& component_type = reg_types_.GetComponentType(array_type, class_loader_->Get());
+      RegType& component_type = reg_types_.GetComponentType(array_type, class_loader_->Get());
       if (!component_type.IsReferenceTypes() && !is_primitive) {
         Fail(VERIFY_ERROR_BAD_CLASS_HARD) << "primitive array type " << array_type
             << " source for aget-object";
@@ -3486,12 +3486,12 @@
   }
 }
 
-void MethodVerifier::VerifyPrimitivePut(const RegType& target_type, const RegType& insn_type,
+void MethodVerifier::VerifyPrimitivePut(RegType& target_type, RegType& insn_type,
                                         const uint32_t vregA) {
   // Primitive assignability rules are weaker than regular assignability rules.
   bool instruction_compatible;
   bool value_compatible;
-  const RegType& value_type = work_line_->GetRegisterType(vregA);
+  RegType& value_type = work_line_->GetRegisterType(vregA);
   if (target_type.IsIntegralTypes()) {
     instruction_compatible = target_type.Equals(insn_type);
     value_compatible = value_type.IsIntegralTypes();
@@ -3500,11 +3500,11 @@
     value_compatible = value_type.IsFloatTypes();
   } else if (target_type.IsLong()) {
     instruction_compatible = insn_type.IsLong();
-    const RegType& value_type_hi = work_line_->GetRegisterType(vregA + 1);
+    RegType& value_type_hi = work_line_->GetRegisterType(vregA + 1);
     value_compatible = value_type.IsLongTypes() && value_type.CheckWidePair(value_type_hi);
   } else if (target_type.IsDouble()) {
     instruction_compatible = insn_type.IsLong();  // no put-double, so expect put-long
-    const RegType& value_type_hi = work_line_->GetRegisterType(vregA + 1);
+    RegType& value_type_hi = work_line_->GetRegisterType(vregA + 1);
     value_compatible = value_type.IsDoubleTypes() && value_type.CheckWidePair(value_type_hi);
   } else {
     instruction_compatible = false;  // reference with primitive store
@@ -3526,19 +3526,19 @@
 }
 
 void MethodVerifier::VerifyAPut(const Instruction* inst,
-                                const RegType& insn_type, bool is_primitive) {
-  const RegType& index_type = work_line_->GetRegisterType(inst->VRegC_23x());
+                                RegType& insn_type, bool is_primitive) {
+  RegType& index_type = work_line_->GetRegisterType(inst->VRegC_23x());
   if (!index_type.IsArrayIndexTypes()) {
     Fail(VERIFY_ERROR_BAD_CLASS_HARD) << "Invalid reg type for array index (" << index_type << ")";
   } else {
-    const RegType& array_type = work_line_->GetRegisterType(inst->VRegB_23x());
+    RegType& array_type = work_line_->GetRegisterType(inst->VRegB_23x());
     if (array_type.IsZero()) {
       // Null array type; this code path will fail at runtime. Infer a merge-able type from the
       // instruction type.
     } else if (!array_type.IsArrayTypes()) {
       Fail(VERIFY_ERROR_BAD_CLASS_HARD) << "not array type " << array_type << " with aput";
     } else {
-      const RegType& component_type = reg_types_.GetComponentType(array_type, class_loader_->Get());
+      RegType& component_type = reg_types_.GetComponentType(array_type, class_loader_->Get());
       const uint32_t vregA = inst->VRegA_23x();
       if (is_primitive) {
         VerifyPrimitivePut(component_type, insn_type, vregA);
@@ -3560,7 +3560,7 @@
 mirror::ArtField* MethodVerifier::GetStaticField(int field_idx) {
   const DexFile::FieldId& field_id = dex_file_->GetFieldId(field_idx);
   // Check access to class
-  const RegType& klass_type = ResolveClassAndCheckAccess(field_id.class_idx_);
+  RegType& klass_type = ResolveClassAndCheckAccess(field_id.class_idx_);
   if (klass_type.IsConflict()) {  // bad class
     AppendToLastFailMessage(StringPrintf(" in attempt to access static field %d (%s) in %s",
                                          field_idx, dex_file_->GetFieldName(field_id),
@@ -3592,10 +3592,10 @@
   return field;
 }
 
-mirror::ArtField* MethodVerifier::GetInstanceField(const RegType& obj_type, int field_idx) {
+mirror::ArtField* MethodVerifier::GetInstanceField(RegType& obj_type, int field_idx) {
   const DexFile::FieldId& field_id = dex_file_->GetFieldId(field_idx);
   // Check access to class
-  const RegType& klass_type = ResolveClassAndCheckAccess(field_id.class_idx_);
+  RegType& klass_type = ResolveClassAndCheckAccess(field_id.class_idx_);
   if (klass_type.IsConflict()) {
     AppendToLastFailMessage(StringPrintf(" in attempt to access instance field %d (%s) in %s",
                                          field_idx, dex_file_->GetFieldName(field_id),
@@ -3629,7 +3629,7 @@
     return field;
   } else {
     mirror::Class* klass = field->GetDeclaringClass();
-    const RegType& field_klass =
+    RegType& field_klass =
         reg_types_.FromClass(dex_file_->GetFieldDeclaringClassDescriptor(field_id),
                              klass, klass->CannotBeAssignedFromOtherTypes());
     if (obj_type.IsUninitializedTypes() &&
@@ -3654,17 +3654,17 @@
   }
 }
 
-void MethodVerifier::VerifyISGet(const Instruction* inst, const RegType& insn_type,
+void MethodVerifier::VerifyISGet(const Instruction* inst, RegType& insn_type,
                                  bool is_primitive, bool is_static) {
   uint32_t field_idx = is_static ? inst->VRegB_21c() : inst->VRegC_22c();
   mirror::ArtField* field;
   if (is_static) {
     field = GetStaticField(field_idx);
   } else {
-    const RegType& object_type = work_line_->GetRegisterType(inst->VRegB_22c());
+    RegType& object_type = work_line_->GetRegisterType(inst->VRegB_22c());
     field = GetInstanceField(object_type, field_idx);
   }
-  const RegType* field_type = nullptr;
+  RegType* field_type = nullptr;
   if (field != NULL) {
     Thread* self = Thread::Current();
     mirror::Class* field_type_class;
@@ -3720,17 +3720,17 @@
   }
 }
 
-void MethodVerifier::VerifyISPut(const Instruction* inst, const RegType& insn_type,
+void MethodVerifier::VerifyISPut(const Instruction* inst, RegType& insn_type,
                                  bool is_primitive, bool is_static) {
   uint32_t field_idx = is_static ? inst->VRegB_21c() : inst->VRegC_22c();
   mirror::ArtField* field;
   if (is_static) {
     field = GetStaticField(field_idx);
   } else {
-    const RegType& object_type = work_line_->GetRegisterType(inst->VRegB_22c());
+    RegType& object_type = work_line_->GetRegisterType(inst->VRegB_22c());
     field = GetInstanceField(object_type, field_idx);
   }
-  const RegType* field_type = nullptr;
+  RegType* field_type = nullptr;
   if (field != NULL) {
     if (field->IsFinal() && field->GetDeclaringClass() != GetDeclaringClass().GetClass()) {
       Fail(VERIFY_ERROR_ACCESS_FIELD) << "cannot modify final field " << PrettyField(field)
@@ -3782,7 +3782,7 @@
          inst->Opcode() == Instruction::IPUT_QUICK ||
          inst->Opcode() == Instruction::IPUT_WIDE_QUICK ||
          inst->Opcode() == Instruction::IPUT_OBJECT_QUICK);
-  const RegType& object_type = reg_line->GetRegisterType(inst->VRegB_22c());
+  RegType& object_type = reg_line->GetRegisterType(inst->VRegB_22c());
   if (!object_type.HasClass()) {
     VLOG(verifier) << "Failed to get mirror::Class* from '" << object_type << "'";
     return nullptr;
@@ -3797,7 +3797,7 @@
   return f;
 }
 
-void MethodVerifier::VerifyIGetQuick(const Instruction* inst, const RegType& insn_type,
+void MethodVerifier::VerifyIGetQuick(const Instruction* inst, RegType& insn_type,
                                      bool is_primitive) {
   DCHECK(Runtime::Current()->IsStarted());
   mirror::ArtField* field = GetQuickFieldAccess(inst, work_line_.get());
@@ -3812,7 +3812,7 @@
     FieldHelper fh(h_field);
     field_type_class = fh.GetType(can_load_classes_);
   }
-  const RegType* field_type;
+  RegType* field_type;
   if (field_type_class != nullptr) {
     field_type = &reg_types_.FromClass(field->GetTypeDescriptor(), field_type_class,
                                        field_type_class->CannotBeAssignedFromOtherTypes());
@@ -3857,7 +3857,7 @@
   }
 }
 
-void MethodVerifier::VerifyIPutQuick(const Instruction* inst, const RegType& insn_type,
+void MethodVerifier::VerifyIPutQuick(const Instruction* inst, RegType& insn_type,
                                      bool is_primitive) {
   DCHECK(Runtime::Current()->IsStarted());
   mirror::ArtField* field = GetQuickFieldAccess(inst, work_line_.get());
@@ -3867,7 +3867,7 @@
   }
   const char* descriptor = field->GetTypeDescriptor();
   mirror::ClassLoader* loader = field->GetDeclaringClass()->GetClassLoader();
-  const RegType& field_type = reg_types_.FromDescriptor(loader, descriptor, false);
+  RegType& field_type = reg_types_.FromDescriptor(loader, descriptor, false);
   if (field != NULL) {
     if (field->IsFinal() && field->GetDeclaringClass() != GetDeclaringClass().GetClass()) {
       Fail(VERIFY_ERROR_ACCESS_FIELD) << "cannot modify final field " << PrettyField(field)
@@ -3880,7 +3880,7 @@
     // Primitive field assignability rules are weaker than regular assignability rules
     bool instruction_compatible;
     bool value_compatible;
-    const RegType& value_type = work_line_->GetRegisterType(vregA);
+    RegType& value_type = work_line_->GetRegisterType(vregA);
     if (field_type.IsIntegralTypes()) {
       instruction_compatible = insn_type.IsIntegralTypes();
       value_compatible = value_type.IsIntegralTypes();
@@ -3998,7 +3998,7 @@
   return &insn_flags_[work_insn_idx_];
 }
 
-const RegType& MethodVerifier::GetMethodReturnType() {
+RegType& MethodVerifier::GetMethodReturnType() {
   if (return_type_ == nullptr) {
     if (mirror_method_ != NULL) {
       Thread* self = Thread::Current();
@@ -4028,7 +4028,7 @@
   return *return_type_;
 }
 
-const RegType& MethodVerifier::GetDeclaringClass() {
+RegType& MethodVerifier::GetDeclaringClass() {
   if (declaring_class_ == NULL) {
     const DexFile::MethodId& method_id = dex_file_->GetMethodId(dex_method_idx_);
     const char* descriptor
@@ -4049,7 +4049,7 @@
   DCHECK(line != nullptr) << "No register line at DEX pc " << StringPrintf("0x%x", dex_pc);
   std::vector<int32_t> result;
   for (size_t i = 0; i < line->NumRegs(); ++i) {
-    const RegType& type = line->GetRegisterType(i);
+    RegType& type = line->GetRegisterType(i);
     if (type.IsConstant()) {
       result.push_back(type.IsPreciseConstant() ? kConstant : kImpreciseConstant);
       result.push_back(type.ConstantValue());
@@ -4089,7 +4089,7 @@
   return result;
 }
 
-const RegType& MethodVerifier::DetermineCat1Constant(int32_t value, bool precise) {
+RegType& MethodVerifier::DetermineCat1Constant(int32_t value, bool precise) {
   if (precise) {
     // Precise constant type.
     return reg_types_.FromCat1Const(value, true);
diff --git a/runtime/verifier/method_verifier.h b/runtime/verifier/method_verifier.h
index 757c419..e63a90c 100644
--- a/runtime/verifier/method_verifier.h
+++ b/runtime/verifier/method_verifier.h
@@ -230,7 +230,7 @@
   bool HasCheckCasts() const;
   bool HasVirtualOrInterfaceInvokes() const;
   bool HasFailures() const;
-  const RegType& ResolveCheckedClass(uint32_t class_idx)
+  RegType& ResolveCheckedClass(uint32_t class_idx)
       SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
 
  private:
@@ -471,34 +471,34 @@
       SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
 
   // Helper to perform verification on puts of primitive type.
-  void VerifyPrimitivePut(const RegType& target_type, const RegType& insn_type,
+  void VerifyPrimitivePut(RegType& target_type, RegType& insn_type,
                           const uint32_t vregA) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
 
   // Perform verification of an aget instruction. The destination register's type will be set to
   // be that of component type of the array unless the array type is unknown, in which case a
   // bottom type inferred from the type of instruction is used. is_primitive is false for an
   // aget-object.
-  void VerifyAGet(const Instruction* inst, const RegType& insn_type,
+  void VerifyAGet(const Instruction* inst, RegType& insn_type,
                   bool is_primitive) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
 
   // Perform verification of an aput instruction.
-  void VerifyAPut(const Instruction* inst, const RegType& insn_type,
+  void VerifyAPut(const Instruction* inst, RegType& insn_type,
                   bool is_primitive) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
 
   // Lookup instance field and fail for resolution violations
-  mirror::ArtField* GetInstanceField(const RegType& obj_type, int field_idx)
+  mirror::ArtField* GetInstanceField(RegType& obj_type, int field_idx)
       SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
 
   // Lookup static field and fail for resolution violations
   mirror::ArtField* GetStaticField(int field_idx) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
 
   // Perform verification of an iget or sget instruction.
-  void VerifyISGet(const Instruction* inst, const RegType& insn_type,
+  void VerifyISGet(const Instruction* inst, RegType& insn_type,
                    bool is_primitive, bool is_static)
       SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
 
   // Perform verification of an iput or sput instruction.
-  void VerifyISPut(const Instruction* inst, const RegType& insn_type,
+  void VerifyISPut(const Instruction* inst, RegType& insn_type,
                    bool is_primitive, bool is_static)
       SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
 
@@ -508,18 +508,18 @@
       SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
 
   // Perform verification of an iget-quick instruction.
-  void VerifyIGetQuick(const Instruction* inst, const RegType& insn_type,
+  void VerifyIGetQuick(const Instruction* inst, RegType& insn_type,
                        bool is_primitive)
       SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
 
   // Perform verification of an iput-quick instruction.
-  void VerifyIPutQuick(const Instruction* inst, const RegType& insn_type,
+  void VerifyIPutQuick(const Instruction* inst, RegType& insn_type,
                        bool is_primitive)
       SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
 
   // Resolves a class based on an index and performs access checks to ensure the referrer can
   // access the resolved class.
-  const RegType& ResolveClassAndCheckAccess(uint32_t class_idx)
+  RegType& ResolveClassAndCheckAccess(uint32_t class_idx)
       SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
 
   /*
@@ -527,7 +527,7 @@
    * address, determine the Join of all exceptions that can land here. Fails if no matching
    * exception handler can be found or if the Join of exception types fails.
    */
-  const RegType& GetCaughtExceptionType()
+  RegType& GetCaughtExceptionType()
       SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
 
   /*
@@ -613,14 +613,14 @@
   }
 
   // Return the register type for the method.
-  const RegType& GetMethodReturnType() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+  RegType& GetMethodReturnType() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
 
   // Get a type representing the declaring class of the method.
-  const RegType& GetDeclaringClass() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+  RegType& GetDeclaringClass() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
 
   InstructionFlags* CurrentInsnFlags();
 
-  const RegType& DetermineCat1Constant(int32_t value, bool precise)
+  RegType& DetermineCat1Constant(int32_t value, bool precise)
       SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
 
   RegTypeCache reg_types_;
@@ -641,7 +641,7 @@
   // Its object representation if known.
   mirror::ArtMethod* mirror_method_ GUARDED_BY(Locks::mutator_lock_);
   const uint32_t method_access_flags_;  // Method's access flags.
-  const RegType* return_type_;  // Lazily computed return type of the method.
+  RegType* return_type_;  // Lazily computed return type of the method.
   const DexFile* const dex_file_;  // The dex file containing the method.
   // The dex_cache for the declaring class of the method.
   Handle<mirror::DexCache>* dex_cache_ GUARDED_BY(Locks::mutator_lock_);
@@ -649,7 +649,7 @@
   Handle<mirror::ClassLoader>* class_loader_ GUARDED_BY(Locks::mutator_lock_);
   const DexFile::ClassDef* const class_def_;  // The class def of the declaring class of the method.
   const DexFile::CodeItem* const code_item_;  // The code item containing the code for the method.
-  const RegType* declaring_class_;  // Lazily computed reg type of the method's declaring class.
+  RegType* declaring_class_;  // Lazily computed reg type of the method's declaring class.
   // Instruction widths and flags, one entry per code unit.
   std::unique_ptr<InstructionFlags[]> insn_flags_;
   // The dex PC of a FindLocksAtDexPc request, -1 otherwise.
diff --git a/runtime/verifier/reg_type.cc b/runtime/verifier/reg_type.cc
index f0729e4..6422cdf 100644
--- a/runtime/verifier/reg_type.cc
+++ b/runtime/verifier/reg_type.cc
@@ -81,7 +81,7 @@
     : PrimitiveType(klass, descriptor, cache_id) {
 }
 
-std::string PreciseConstType::Dump() const {
+std::string PreciseConstType::Dump() {
   std::stringstream result;
   uint32_t val = ConstantValue();
   if (val == 0) {
@@ -98,47 +98,47 @@
   return result.str();
 }
 
-std::string BooleanType::Dump() const {
+std::string BooleanType::Dump() {
   return "Boolean";
 }
 
-std::string ConflictType::Dump() const {
+std::string ConflictType::Dump() {
     return "Conflict";
 }
 
-std::string ByteType::Dump() const {
+std::string ByteType::Dump() {
   return "Byte";
 }
 
-std::string ShortType::Dump() const {
+std::string ShortType::Dump() {
   return "Short";
 }
 
-std::string CharType::Dump() const {
+std::string CharType::Dump() {
   return "Char";
 }
 
-std::string FloatType::Dump() const {
+std::string FloatType::Dump() {
   return "Float";
 }
 
-std::string LongLoType::Dump() const {
+std::string LongLoType::Dump() {
   return "Long (Low Half)";
 }
 
-std::string LongHiType::Dump() const {
+std::string LongHiType::Dump() {
   return "Long (High Half)";
 }
 
-std::string DoubleLoType::Dump() const {
+std::string DoubleLoType::Dump() {
   return "Double (Low Half)";
 }
 
-std::string DoubleHiType::Dump() const {
+std::string DoubleHiType::Dump() {
   return "Double (High Half)";
 }
 
-std::string IntegerType::Dump() const {
+std::string IntegerType::Dump() {
     return "Integer";
 }
 
@@ -361,7 +361,7 @@
   }
 }
 
-std::string UndefinedType::Dump() const SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+std::string UndefinedType::Dump() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
   return "Undefined";
 }
 
@@ -391,7 +391,7 @@
   DCHECK(klass->IsInstantiable());
 }
 
-std::string UnresolvedMergedType::Dump() const {
+std::string UnresolvedMergedType::Dump() {
   std::stringstream result;
   std::set<uint16_t> types = GetMergedTypes();
   result << "UnresolvedMergedReferences(";
@@ -405,59 +405,59 @@
   return result.str();
 }
 
-std::string UnresolvedSuperClass::Dump() const {
+std::string UnresolvedSuperClass::Dump() {
   std::stringstream result;
   uint16_t super_type_id = GetUnresolvedSuperClassChildId();
   result << "UnresolvedSuperClass(" << reg_type_cache_->GetFromId(super_type_id).Dump() << ")";
   return result.str();
 }
 
-std::string UnresolvedReferenceType::Dump() const {
+std::string UnresolvedReferenceType::Dump() {
   std::stringstream result;
   result << "Unresolved Reference" << ": " << PrettyDescriptor(GetDescriptor());
   return result.str();
 }
 
-std::string UnresolvedUninitializedRefType::Dump() const {
+std::string UnresolvedUninitializedRefType::Dump() {
   std::stringstream result;
   result << "Unresolved And Uninitialized Reference" << ": " << PrettyDescriptor(GetDescriptor());
   result << " Allocation PC: " << GetAllocationPc();
   return result.str();
 }
 
-std::string UnresolvedUninitializedThisRefType::Dump() const {
+std::string UnresolvedUninitializedThisRefType::Dump() {
   std::stringstream result;
   result << "Unresolved And Uninitialized This Reference" << PrettyDescriptor(GetDescriptor());
   return result.str();
 }
 
-std::string ReferenceType::Dump() const {
+std::string ReferenceType::Dump() {
   std::stringstream result;
   result << "Reference" << ": " << PrettyDescriptor(GetClass());
   return result.str();
 }
 
-std::string PreciseReferenceType::Dump() const {
+std::string PreciseReferenceType::Dump() {
   std::stringstream result;
   result << "Precise Reference" << ": "<< PrettyDescriptor(GetClass());
   return result.str();
 }
 
-std::string UninitializedReferenceType::Dump() const {
+std::string UninitializedReferenceType::Dump() {
   std::stringstream result;
   result << "Uninitialized Reference" << ": " << PrettyDescriptor(GetClass());
   result << " Allocation PC: " << GetAllocationPc();
   return result.str();
 }
 
-std::string UninitializedThisReferenceType::Dump() const {
+std::string UninitializedThisReferenceType::Dump() {
   std::stringstream result;
   result << "Uninitialized This Reference" << ": " << PrettyDescriptor(GetClass());
   result << "Allocation PC: " << GetAllocationPc();
   return result.str();
 }
 
-std::string ImpreciseConstType::Dump() const {
+std::string ImpreciseConstType::Dump() {
   std::stringstream result;
   uint32_t val = ConstantValue();
   if (val == 0) {
@@ -472,7 +472,7 @@
   }
   return result.str();
 }
-std::string PreciseConstLoType::Dump() const {
+std::string PreciseConstLoType::Dump() {
   std::stringstream result;
 
   int32_t val = ConstantValueLo();
@@ -486,7 +486,7 @@
   return result.str();
 }
 
-std::string ImpreciseConstLoType::Dump() const {
+std::string ImpreciseConstLoType::Dump() {
   std::stringstream result;
 
   int32_t val = ConstantValueLo();
@@ -500,7 +500,7 @@
   return result.str();
 }
 
-std::string PreciseConstHiType::Dump() const {
+std::string PreciseConstHiType::Dump() {
   std::stringstream result;
   int32_t val = ConstantValueHi();
   result << "Precise ";
@@ -513,7 +513,7 @@
   return result.str();
 }
 
-std::string ImpreciseConstHiType::Dump() const {
+std::string ImpreciseConstHiType::Dump() {
   std::stringstream result;
   int32_t val = ConstantValueHi();
   result << "Imprecise ";
@@ -530,7 +530,7 @@
     : RegType(NULL, "", cache_id), constant_(constant) {
 }
 
-const RegType& UndefinedType::Merge(const RegType& incoming_type, RegTypeCache* reg_types) const
+RegType& UndefinedType::Merge(RegType& incoming_type, RegTypeCache* reg_types)
     SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
   if (incoming_type.IsUndefined()) {
     return *this;  // Undefined MERGE Undefined => Undefined
@@ -538,7 +538,7 @@
   return reg_types->Conflict();
 }
 
-const RegType& RegType::HighHalf(RegTypeCache* cache) const {
+RegType& RegType::HighHalf(RegTypeCache* cache) const {
   DCHECK(IsLowHalf());
   if (IsLongLo()) {
     return cache->LongHi();
@@ -586,12 +586,10 @@
 }
 std::set<uint16_t> UnresolvedMergedType::GetMergedTypes() const {
   std::pair<uint16_t, uint16_t> refs = GetTopMergedTypes();
-  const RegType& _left(reg_type_cache_->GetFromId(refs.first));
-  RegType& __left(const_cast<RegType&>(_left));
-  UnresolvedMergedType* left = down_cast<UnresolvedMergedType*>(&__left);
+  RegType& _left(reg_type_cache_->GetFromId(refs.first));
+  UnresolvedMergedType* left = down_cast<UnresolvedMergedType*>(&_left);
 
-  RegType& _right(
-      const_cast<RegType&>(reg_type_cache_->GetFromId(refs.second)));
+  RegType& _right(reg_type_cache_->GetFromId(refs.second));
   UnresolvedMergedType* right = down_cast<UnresolvedMergedType*>(&_right);
 
   std::set<uint16_t> types;
@@ -614,7 +612,7 @@
   return types;
 }
 
-const RegType& RegType::GetSuperClass(RegTypeCache* cache) const {
+RegType& RegType::GetSuperClass(RegTypeCache* cache) {
   if (!IsUnresolvedTypes()) {
     mirror::Class* super_klass = GetClass()->GetSuperClass();
     if (super_klass != NULL) {
@@ -635,7 +633,7 @@
   }
 }
 
-bool RegType::CanAccess(const RegType& other) const {
+bool RegType::CanAccess(RegType& other) {
   if (Equals(other)) {
     return true;  // Trivial accessibility.
   } else {
@@ -651,7 +649,7 @@
   }
 }
 
-bool RegType::CanAccessMember(mirror::Class* klass, uint32_t access_flags) const {
+bool RegType::CanAccessMember(mirror::Class* klass, uint32_t access_flags) {
   if ((access_flags & kAccPublic) != 0) {
     return true;
   }
@@ -662,7 +660,7 @@
   }
 }
 
-bool RegType::IsObjectArrayTypes() const SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+bool RegType::IsObjectArrayTypes() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
   if (IsUnresolvedTypes() && !IsUnresolvedMergedReference() && !IsUnresolvedSuperClass()) {
     // Primitive arrays will always resolve
     DCHECK(descriptor_[1] == 'L' || descriptor_[1] == '[');
@@ -675,11 +673,11 @@
   }
 }
 
-bool RegType::IsJavaLangObject() const SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+bool RegType::IsJavaLangObject() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
   return IsReference() && GetClass()->IsObjectClass();
 }
 
-bool RegType::IsArrayTypes() const SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+bool RegType::IsArrayTypes() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
   if (IsUnresolvedTypes() && !IsUnresolvedMergedReference() && !IsUnresolvedSuperClass()) {
     return descriptor_[0] == '[';
   } else if (HasClass()) {
@@ -689,7 +687,7 @@
   }
 }
 
-bool RegType::IsJavaLangObjectArray() const {
+bool RegType::IsJavaLangObjectArray() {
   if (HasClass()) {
     mirror::Class* type = GetClass();
     return type->IsArrayClass() && type->GetComponentType()->IsObjectClass();
@@ -697,7 +695,7 @@
   return false;
 }
 
-bool RegType::IsInstantiableTypes() const {
+bool RegType::IsInstantiableTypes() {
   return IsUnresolvedTypes() || (IsNonZeroReferenceTypes() && GetClass()->IsInstantiable());
 }
 
@@ -705,7 +703,7 @@
   : ConstantType(constat, cache_id) {
 }
 
-static bool AssignableFrom(const RegType& lhs, const RegType& rhs, bool strict)
+static bool AssignableFrom(RegType& lhs, RegType& rhs, bool strict)
     SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
   if (lhs.Equals(rhs)) {
     return true;
@@ -753,11 +751,11 @@
   }
 }
 
-bool RegType::IsAssignableFrom(const RegType& src) const {
+bool RegType::IsAssignableFrom(RegType& src) {
   return AssignableFrom(*this, src, false);
 }
 
-bool RegType::IsStrictlyAssignableFrom(const RegType& src) const {
+bool RegType::IsStrictlyAssignableFrom(RegType& src) {
   return AssignableFrom(*this, src, true);
 }
 
@@ -775,11 +773,11 @@
   }
 }
 
-static const RegType& SelectNonConstant(const RegType& a, const RegType& b) {
+static RegType& SelectNonConstant(RegType& a, RegType& b) {
   return a.IsConstantTypes() ? b : a;
 }
 
-const RegType& RegType::Merge(const RegType& incoming_type, RegTypeCache* reg_types) const {
+RegType& RegType::Merge(RegType& incoming_type, RegTypeCache* reg_types) {
   DCHECK(!Equals(incoming_type));  // Trivial equality handled by caller
   if (IsConflict()) {
     return *this;  // Conflict MERGE * => Conflict
@@ -958,16 +956,16 @@
 void RegType::CheckInvariants() const {
   if (IsConstant() || IsConstantLo() || IsConstantHi()) {
     CHECK(descriptor_.empty()) << *this;
-    CHECK(klass_ == NULL) << *this;
+    CHECK(klass_.IsNull()) << *this;
   }
-  if (klass_ != NULL) {
+  if (!klass_.IsNull()) {
     CHECK(!descriptor_.empty()) << *this;
   }
 }
 
 void RegType::VisitRoots(RootCallback* callback, void* arg) {
-  if (klass_ != nullptr) {
-    callback(reinterpret_cast<mirror::Object**>(&klass_), arg, 0, kRootUnknown);
+  if (!klass_.IsNull()) {
+    klass_.VisitRoot(callback, arg, 0, kRootUnknown);
   }
 }
 
@@ -978,36 +976,37 @@
 void UnresolvedUninitializedThisRefType::CheckInvariants() const {
   CHECK_EQ(GetAllocationPc(), 0U) << *this;
   CHECK(!descriptor_.empty()) << *this;
-  CHECK(klass_ == NULL) << *this;
+  CHECK(klass_.IsNull()) << *this;
 }
 
 void UnresolvedUninitializedRefType::CheckInvariants() const {
   CHECK(!descriptor_.empty()) << *this;
-  CHECK(klass_ == NULL) << *this;
+  CHECK(klass_.IsNull()) << *this;
 }
 
 void UnresolvedMergedType::CheckInvariants() const {
   // Unresolved merged types: merged types should be defined.
   CHECK(descriptor_.empty()) << *this;
-  CHECK(klass_ == NULL) << *this;
+  CHECK(klass_.IsNull()) << *this;
   CHECK_NE(merged_types_.first, 0U) << *this;
   CHECK_NE(merged_types_.second, 0U) << *this;
 }
 
 void UnresolvedReferenceType::CheckInvariants() const {
   CHECK(!descriptor_.empty()) << *this;
-  CHECK(klass_ == NULL) << *this;
+  CHECK(klass_.IsNull()) << *this;
 }
 
 void UnresolvedSuperClass::CheckInvariants() const {
   // Unresolved merged types: merged types should be defined.
   CHECK(descriptor_.empty()) << *this;
-  CHECK(klass_ == NULL) << *this;
+  CHECK(klass_.IsNull()) << *this;
   CHECK_NE(unresolved_child_id_, 0U) << *this;
 }
 
 std::ostream& operator<<(std::ostream& os, const RegType& rhs) {
-  os << rhs.Dump();
+  RegType& rhs_non_const = const_cast<RegType&>(rhs);
+  os << rhs_non_const.Dump();
   return os;
 }
 
diff --git a/runtime/verifier/reg_type.h b/runtime/verifier/reg_type.h
index e985f3a..d508fb5 100644
--- a/runtime/verifier/reg_type.h
+++ b/runtime/verifier/reg_type.h
@@ -25,6 +25,7 @@
 #include "jni.h"
 
 #include "base/macros.h"
+#include "gc_root.h"
 #include "globals.h"
 #include "object_callbacks.h"
 #include "primitive.h"
@@ -107,7 +108,7 @@
     return IsLowHalf();
   }
   // Check this is the low half, and that type_h is its matching high-half.
-  inline bool CheckWidePair(const RegType& type_h) const {
+  inline bool CheckWidePair(RegType& type_h) const {
     if (IsLowHalf()) {
       return ((IsPreciseConstantLo() && type_h.IsPreciseConstantHi()) ||
               (IsPreciseConstantLo() && type_h.IsImpreciseConstantHi()) ||
@@ -119,7 +120,7 @@
     return false;
   }
   // The high half that corresponds to this low half
-  const RegType& HighHalf(RegTypeCache* cache) const SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+  RegType& HighHalf(RegTypeCache* cache) const SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
 
   bool IsConstantBoolean() const {
     return IsConstant() && (ConstantValue() >= 0) && (ConstantValue() <= 1);
@@ -198,55 +199,54 @@
   virtual bool HasClass() const {
     return false;
   }
-  bool IsJavaLangObject() const SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
-  bool IsArrayTypes() const SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
-  bool IsObjectArrayTypes() const SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+  bool IsJavaLangObject() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+  bool IsArrayTypes() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+  bool IsObjectArrayTypes() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
   Primitive::Type GetPrimitiveType() const;
-  bool IsJavaLangObjectArray() const SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
-  bool IsInstantiableTypes() const SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+  bool IsJavaLangObjectArray() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+  bool IsInstantiableTypes() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
   const std::string& GetDescriptor() const {
     DCHECK(HasClass() || (IsUnresolvedTypes() && !IsUnresolvedMergedReference() &&
                           !IsUnresolvedSuperClass()));
     return descriptor_;
   }
-  mirror::Class* GetClass() const SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+  mirror::Class* GetClass() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
     DCHECK(!IsUnresolvedReference());
-    DCHECK(klass_ != NULL) << Dump();
+    DCHECK(!klass_.IsNull()) << Dump();
     DCHECK(HasClass());
-    return klass_;
+    return klass_.Read();
   }
   uint16_t GetId() const {
     return cache_id_;
   }
-  const RegType& GetSuperClass(RegTypeCache* cache) const
+  RegType& GetSuperClass(RegTypeCache* cache)
       SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
 
-  virtual std::string Dump() const SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) = 0;
+  virtual std::string Dump() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) = 0;
 
   // Can this type access other?
-  bool CanAccess(const RegType& other) const SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+  bool CanAccess(RegType& other) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
 
   // Can this type access a member with the given properties?
-  bool CanAccessMember(mirror::Class* klass, uint32_t access_flags) const
+  bool CanAccessMember(mirror::Class* klass, uint32_t access_flags)
     SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
 
   // Can this type be assigned by src?
   // Note: Object and interface types may always be assigned to one another, see comment on
   // ClassJoin.
-  bool IsAssignableFrom(const RegType& src) const SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+  bool IsAssignableFrom(RegType& src) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
 
   // Can this type be assigned by src? Variant of IsAssignableFrom that doesn't allow assignment to
   // an interface from an Object.
-  bool IsStrictlyAssignableFrom(const RegType& src) const
-      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+  bool IsStrictlyAssignableFrom(RegType& src) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
 
   // Are these RegTypes the same?
-  bool Equals(const RegType& other) const {
+  bool Equals(RegType& other) const {
     return GetId() == other.GetId();
   }
 
   // Compute the merge of this register from one edge (path) with incoming_type from another.
-  virtual const RegType& Merge(const RegType& incoming_type, RegTypeCache* reg_types) const
+  virtual RegType& Merge(RegType& incoming_type, RegTypeCache* reg_types)
       SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
 
   /*
@@ -275,7 +275,7 @@
  protected:
   RegType(mirror::Class* klass, const std::string& descriptor, uint16_t cache_id)
       SHARED_LOCKS_REQUIRED(Locks::mutator_lock_)
-      : descriptor_(descriptor), klass_(klass), cache_id_(cache_id) {
+      : descriptor_(descriptor), klass_(GcRoot<mirror::Class>(klass)), cache_id_(cache_id) {
     if (kIsDebugBuild) {
       CheckInvariants();
     }
@@ -285,7 +285,7 @@
 
 
   const std::string descriptor_;
-  mirror::Class* klass_;  // Non-const only due to moving classes.
+  GcRoot<mirror::Class> klass_;
   const uint16_t cache_id_;
 
   friend class RegTypeCache;
@@ -301,7 +301,7 @@
     return true;
   }
 
-  std::string Dump() const SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+  std::string Dump() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
 
   // Get the singleton Conflict instance.
   static ConflictType* GetInstance();
@@ -331,7 +331,7 @@
     return true;
   }
 
-  std::string Dump() const SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+  std::string Dump() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
 
   // Get the singleton Undefined instance.
   static UndefinedType* GetInstance();
@@ -350,7 +350,7 @@
       : RegType(klass, descriptor, cache_id) {
   }
 
-  virtual const RegType& Merge(const RegType& incoming_type, RegTypeCache* reg_types) const
+  virtual RegType& Merge(RegType& incoming_type, RegTypeCache* reg_types)
       SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
 
   static UndefinedType* instance_;
@@ -373,7 +373,7 @@
   bool IsInteger() const {
     return true;
   }
-  std::string Dump() const SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+  std::string Dump() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
   static IntegerType* CreateInstance(mirror::Class* klass, const std::string& descriptor,
                                      uint16_t cache_id)
       SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
@@ -392,7 +392,7 @@
   bool IsBoolean() const {
     return true;
   }
-  std::string Dump() const SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+  std::string Dump() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
   static BooleanType* CreateInstance(mirror::Class* klass, const std::string& descriptor,
                                      uint16_t cache_id)
       SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
@@ -412,7 +412,7 @@
   bool IsByte() const {
     return true;
   }
-  std::string Dump() const SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+  std::string Dump() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
   static ByteType* CreateInstance(mirror::Class* klass, const std::string& descriptor,
                                   uint16_t cache_id)
       SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
@@ -431,7 +431,7 @@
   bool IsShort() const {
     return true;
   }
-  std::string Dump() const SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+  std::string Dump() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
   static ShortType* CreateInstance(mirror::Class* klass, const std::string& descriptor,
                                    uint16_t cache_id)
       SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
@@ -450,7 +450,7 @@
   bool IsChar() const {
     return true;
   }
-  std::string Dump() const SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+  std::string Dump() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
   static CharType* CreateInstance(mirror::Class* klass, const std::string& descriptor,
                                   uint16_t cache_id)
       SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
@@ -469,7 +469,7 @@
   bool IsFloat() const {
     return true;
   }
-  std::string Dump() const SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+  std::string Dump() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
   static FloatType* CreateInstance(mirror::Class* klass, const std::string& descriptor,
                                    uint16_t cache_id)
       SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
@@ -491,7 +491,7 @@
 
 class LongLoType : public Cat2Type {
  public:
-  std::string Dump() const SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+  std::string Dump() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
   bool IsLongLo() const {
     return true;
   }
@@ -513,7 +513,7 @@
 
 class LongHiType : public Cat2Type {
  public:
-  std::string Dump() const SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+  std::string Dump() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
   bool IsLongHi() const {
     return true;
   }
@@ -532,7 +532,7 @@
 
 class DoubleLoType : public Cat2Type {
  public:
-  std::string Dump() const SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+  std::string Dump() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
   bool IsDoubleLo() const {
     return true;
   }
@@ -554,7 +554,7 @@
 
 class DoubleHiType : public Cat2Type {
  public:
-  std::string Dump() const;
+  std::string Dump();
   virtual bool IsDoubleHi() const {
     return true;
   }
@@ -621,7 +621,7 @@
     return true;
   }
 
-  std::string Dump() const SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+  std::string Dump() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
 };
 
 class PreciseConstLoType : public ConstantType {
@@ -633,7 +633,7 @@
   bool IsPreciseConstantLo() const {
     return true;
   }
-  std::string Dump() const SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+  std::string Dump() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
 };
 
 class PreciseConstHiType : public ConstantType {
@@ -645,7 +645,7 @@
   bool IsPreciseConstantHi() const {
     return true;
   }
-  std::string Dump() const SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+  std::string Dump() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
 };
 
 class ImpreciseConstType : public ConstantType {
@@ -655,7 +655,7 @@
   bool IsImpreciseConstant() const {
     return true;
   }
-  std::string Dump() const SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+  std::string Dump() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
 };
 
 class ImpreciseConstLoType : public ConstantType {
@@ -666,7 +666,7 @@
   bool IsImpreciseConstantLo() const {
     return true;
   }
-  std::string Dump() const SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+  std::string Dump() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
 };
 
 class ImpreciseConstHiType : public ConstantType {
@@ -677,7 +677,7 @@
   bool IsImpreciseConstantHi() const {
     return true;
   }
-  std::string Dump() const SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+  std::string Dump() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
 };
 
 // Common parent of all uninitialized types. Uninitialized types are created by "new" dex
@@ -718,7 +718,7 @@
     return true;
   }
 
-  std::string Dump() const SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+  std::string Dump() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
 };
 
 // Similar to UnresolvedReferenceType but not yet having been passed to a constructor.
@@ -737,7 +737,7 @@
     return true;
   }
 
-  std::string Dump() const SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+  std::string Dump() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
  private:
   void CheckInvariants() const SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
 };
@@ -762,7 +762,7 @@
     return true;
   }
 
-  std::string Dump() const SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+  std::string Dump() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
 
  private:
   void CheckInvariants() const SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
@@ -782,7 +782,7 @@
     return true;
   }
 
-  std::string Dump() const SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+  std::string Dump() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
  private:
   void CheckInvariants() const SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
 };
@@ -807,7 +807,7 @@
     return true;
   }
 
-  std::string Dump() const SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+  std::string Dump() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
 };
 
 // A type of register holding a reference to an Object of type GetClass and only an object of that
@@ -829,7 +829,7 @@
     return true;
   }
 
-  std::string Dump() const SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+  std::string Dump() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
 };
 
 // Common parent of unresolved types.
@@ -857,7 +857,7 @@
     return true;
   }
 
-  std::string Dump() const SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+  std::string Dump() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
  private:
   void CheckInvariants() const SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
 };
@@ -883,7 +883,7 @@
     return static_cast<uint16_t>(unresolved_child_id_ & 0xFFFF);
   }
 
-  std::string Dump() const SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+  std::string Dump() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
 
  private:
   void CheckInvariants() const SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
@@ -918,7 +918,7 @@
     return true;
   }
 
-  std::string Dump() const SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+  std::string Dump() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
 
  private:
   void CheckInvariants() const SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
diff --git a/runtime/verifier/reg_type_cache-inl.h b/runtime/verifier/reg_type_cache-inl.h
index fc9e5c9..fdf96a8 100644
--- a/runtime/verifier/reg_type_cache-inl.h
+++ b/runtime/verifier/reg_type_cache-inl.h
@@ -24,14 +24,14 @@
 namespace art {
 namespace verifier {
 
-inline const art::verifier::RegType& RegTypeCache::GetFromId(uint16_t id) const {
+inline RegType& RegTypeCache::GetFromId(uint16_t id) const {
   DCHECK_LT(id, entries_.size());
   RegType* result = entries_[id];
   DCHECK(result != NULL);
   return *result;
 }
 
-inline const ConstantType& RegTypeCache::FromCat1Const(int32_t value, bool precise) {
+inline ConstantType& RegTypeCache::FromCat1Const(int32_t value, bool precise) {
   // We only expect 0 to be a precise constant.
   DCHECK(value != 0 || precise);
   if (precise && (value >= kMinSmallConstant) && (value <= kMaxSmallConstant)) {
diff --git a/runtime/verifier/reg_type_cache.cc b/runtime/verifier/reg_type_cache.cc
index d51374b..255b506 100644
--- a/runtime/verifier/reg_type_cache.cc
+++ b/runtime/verifier/reg_type_cache.cc
@@ -65,8 +65,8 @@
   DCHECK_EQ(entries_.size(), primitive_count_);
 }
 
-const RegType& RegTypeCache::FromDescriptor(mirror::ClassLoader* loader, const char* descriptor,
-                                            bool precise) {
+RegType& RegTypeCache::FromDescriptor(mirror::ClassLoader* loader, const char* descriptor,
+                                      bool precise) {
   DCHECK(RegTypeCache::primitive_initialized_);
   if (descriptor[1] == '\0') {
     switch (descriptor[0]) {
@@ -97,7 +97,7 @@
   }
 };
 
-const RegType& RegTypeCache::RegTypeFromPrimitiveType(Primitive::Type prim_type) const {
+RegType& RegTypeCache::RegTypeFromPrimitiveType(Primitive::Type prim_type) const {
   CHECK(RegTypeCache::primitive_initialized_);
   switch (prim_type) {
     case Primitive::kPrimBoolean:
@@ -156,8 +156,8 @@
   return klass;
 }
 
-const RegType& RegTypeCache::From(mirror::ClassLoader* loader, const char* descriptor,
-                                  bool precise) {
+RegType& RegTypeCache::From(mirror::ClassLoader* loader, const char* descriptor,
+                            bool precise) {
   // Try looking up the class in the cache first.
   for (size_t i = primitive_count_; i < entries_.size(); i++) {
     if (MatchDescriptor(i, descriptor, precise)) {
@@ -208,7 +208,7 @@
   }
 }
 
-const RegType& RegTypeCache::FromClass(const char* descriptor, mirror::Class* klass, bool precise) {
+RegType& RegTypeCache::FromClass(const char* descriptor, mirror::Class* klass, bool precise) {
   DCHECK(klass != nullptr && !klass->IsErroneous());
   if (klass->IsPrimitive()) {
     // Note: precise isn't used for primitive classes. A char is assignable to an int. All
@@ -218,7 +218,7 @@
     // Look for the reference in the list of entries to have.
     for (size_t i = primitive_count_; i < entries_.size(); i++) {
       RegType* cur_entry = entries_[i];
-      if (cur_entry->klass_ == klass && MatchingPrecisionForClass(cur_entry, precise)) {
+      if (cur_entry->klass_.Read() == klass && MatchingPrecisionForClass(cur_entry, precise)) {
         return *cur_entry;
       }
     }
@@ -311,17 +311,15 @@
   }
 }
 
-const RegType& RegTypeCache::FromUnresolvedMerge(const RegType& left, const RegType& right) {
+RegType& RegTypeCache::FromUnresolvedMerge(RegType& left, RegType& right) {
   std::set<uint16_t> types;
   if (left.IsUnresolvedMergedReference()) {
-    RegType& non_const(const_cast<RegType&>(left));
-    types = (down_cast<UnresolvedMergedType*>(&non_const))->GetMergedTypes();
+    types = (down_cast<UnresolvedMergedType*>(&left))->GetMergedTypes();
   } else {
     types.insert(left.GetId());
   }
   if (right.IsUnresolvedMergedReference()) {
-    RegType& non_const(const_cast<RegType&>(right));
-    std::set<uint16_t> right_types = (down_cast<UnresolvedMergedType*>(&non_const))->GetMergedTypes();
+    std::set<uint16_t> right_types = (down_cast<UnresolvedMergedType*>(&right))->GetMergedTypes();
     types.insert(right_types.begin(), right_types.end());
   } else {
     types.insert(right.GetId());
@@ -348,7 +346,7 @@
   return *entry;
 }
 
-const RegType& RegTypeCache::FromUnresolvedSuperClass(const RegType& child) {
+RegType& RegTypeCache::FromUnresolvedSuperClass(RegType& child) {
   // Check if entry already exists.
   for (size_t i = primitive_count_; i < entries_.size(); i++) {
     RegType* cur_entry = entries_[i];
@@ -367,7 +365,7 @@
   return *entry;
 }
 
-const UninitializedType& RegTypeCache::Uninitialized(const RegType& type, uint32_t allocation_pc) {
+UninitializedType& RegTypeCache::Uninitialized(RegType& type, uint32_t allocation_pc) {
   UninitializedType* entry = NULL;
   const std::string& descriptor(type.GetDescriptor());
   if (type.IsUnresolvedTypes()) {
@@ -397,7 +395,7 @@
   return *entry;
 }
 
-const RegType& RegTypeCache::FromUninitialized(const RegType& uninit_type) {
+RegType& RegTypeCache::FromUninitialized(RegType& uninit_type) {
   RegType* entry;
 
   if (uninit_type.IsUnresolvedTypes()) {
@@ -439,44 +437,44 @@
   return *entry;
 }
 
-const ImpreciseConstType& RegTypeCache::ByteConstant() {
-  const ConstantType& result = FromCat1Const(std::numeric_limits<jbyte>::min(), false);
+ImpreciseConstType& RegTypeCache::ByteConstant() {
+  ConstantType& result = FromCat1Const(std::numeric_limits<jbyte>::min(), false);
   DCHECK(result.IsImpreciseConstant());
-  return *down_cast<const ImpreciseConstType*>(&result);
+  return *down_cast<ImpreciseConstType*>(&result);
 }
 
-const ImpreciseConstType& RegTypeCache::CharConstant() {
+ImpreciseConstType& RegTypeCache::CharConstant() {
   int32_t jchar_max = static_cast<int32_t>(std::numeric_limits<jchar>::max());
-  const ConstantType& result =  FromCat1Const(jchar_max, false);
+  ConstantType& result =  FromCat1Const(jchar_max, false);
   DCHECK(result.IsImpreciseConstant());
-  return *down_cast<const ImpreciseConstType*>(&result);
+  return *down_cast<ImpreciseConstType*>(&result);
 }
 
-const ImpreciseConstType& RegTypeCache::ShortConstant() {
-  const ConstantType& result =  FromCat1Const(std::numeric_limits<jshort>::min(), false);
+ImpreciseConstType& RegTypeCache::ShortConstant() {
+  ConstantType& result =  FromCat1Const(std::numeric_limits<jshort>::min(), false);
   DCHECK(result.IsImpreciseConstant());
-  return *down_cast<const ImpreciseConstType*>(&result);
+  return *down_cast<ImpreciseConstType*>(&result);
 }
 
-const ImpreciseConstType& RegTypeCache::IntConstant() {
-  const ConstantType& result = FromCat1Const(std::numeric_limits<jint>::max(), false);
+ImpreciseConstType& RegTypeCache::IntConstant() {
+  ConstantType& result = FromCat1Const(std::numeric_limits<jint>::max(), false);
   DCHECK(result.IsImpreciseConstant());
-  return *down_cast<const ImpreciseConstType*>(&result);
+  return *down_cast<ImpreciseConstType*>(&result);
 }
 
-const ImpreciseConstType& RegTypeCache::PosByteConstant() {
-  const ConstantType& result = FromCat1Const(std::numeric_limits<jbyte>::max(), false);
+ImpreciseConstType& RegTypeCache::PosByteConstant() {
+  ConstantType& result = FromCat1Const(std::numeric_limits<jbyte>::max(), false);
   DCHECK(result.IsImpreciseConstant());
-  return *down_cast<const ImpreciseConstType*>(&result);
+  return *down_cast<ImpreciseConstType*>(&result);
 }
 
-const ImpreciseConstType& RegTypeCache::PosShortConstant() {
-  const ConstantType& result =  FromCat1Const(std::numeric_limits<jshort>::max(), false);
+ImpreciseConstType& RegTypeCache::PosShortConstant() {
+  ConstantType& result =  FromCat1Const(std::numeric_limits<jshort>::max(), false);
   DCHECK(result.IsImpreciseConstant());
-  return *down_cast<const ImpreciseConstType*>(&result);
+  return *down_cast<ImpreciseConstType*>(&result);
 }
 
-const UninitializedType& RegTypeCache::UninitializedThisArgument(const RegType& type) {
+UninitializedType& RegTypeCache::UninitializedThisArgument(RegType& type) {
   UninitializedType* entry;
   const std::string& descriptor(type.GetDescriptor());
   if (type.IsUnresolvedTypes()) {
@@ -502,10 +500,10 @@
   return *entry;
 }
 
-const ConstantType& RegTypeCache::FromCat1NonSmallConstant(int32_t value, bool precise) {
+ConstantType& RegTypeCache::FromCat1NonSmallConstant(int32_t value, bool precise) {
   for (size_t i = primitive_count_; i < entries_.size(); i++) {
     RegType* cur_entry = entries_[i];
-    if (cur_entry->klass_ == NULL && cur_entry->IsConstant() &&
+    if (cur_entry->klass_.IsNull() && cur_entry->IsConstant() &&
         cur_entry->IsPreciseConstant() == precise &&
         (down_cast<ConstantType*>(cur_entry))->ConstantValue() == value) {
       return *down_cast<ConstantType*>(cur_entry);
@@ -521,7 +519,7 @@
   return *entry;
 }
 
-const ConstantType& RegTypeCache::FromCat2ConstLo(int32_t value, bool precise) {
+ConstantType& RegTypeCache::FromCat2ConstLo(int32_t value, bool precise) {
   for (size_t i = primitive_count_; i < entries_.size(); i++) {
     RegType* cur_entry = entries_[i];
     if (cur_entry->IsConstantLo() && (cur_entry->IsPrecise() == precise) &&
@@ -539,7 +537,7 @@
   return *entry;
 }
 
-const ConstantType& RegTypeCache::FromCat2ConstHi(int32_t value, bool precise) {
+ConstantType& RegTypeCache::FromCat2ConstHi(int32_t value, bool precise) {
   for (size_t i = primitive_count_; i < entries_.size(); i++) {
     RegType* cur_entry = entries_[i];
     if (cur_entry->IsConstantHi() && (cur_entry->IsPrecise() == precise) &&
@@ -557,7 +555,7 @@
   return *entry;
 }
 
-const RegType& RegTypeCache::GetComponentType(const RegType& array, mirror::ClassLoader* loader) {
+RegType& RegTypeCache::GetComponentType(RegType& array, mirror::ClassLoader* loader) {
   if (!array.IsArrayTypes()) {
     return Conflict();
   } else if (array.IsUnresolvedTypes()) {
diff --git a/runtime/verifier/reg_type_cache.h b/runtime/verifier/reg_type_cache.h
index f42fdd1..d46cf2c 100644
--- a/runtime/verifier/reg_type_cache.h
+++ b/runtime/verifier/reg_type_cache.h
@@ -49,99 +49,99 @@
     }
   }
   static void ShutDown();
-  const art::verifier::RegType& GetFromId(uint16_t id) const;
-  const RegType& From(mirror::ClassLoader* loader, const char* descriptor, bool precise)
+  RegType& GetFromId(uint16_t id) const;
+  RegType& From(mirror::ClassLoader* loader, const char* descriptor, bool precise)
       SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
-  const RegType& FromClass(const char* descriptor, mirror::Class* klass, bool precise)
+  RegType& FromClass(const char* descriptor, mirror::Class* klass, bool precise)
       SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
-  const ConstantType& FromCat1Const(int32_t value, bool precise)
+  ConstantType& FromCat1Const(int32_t value, bool precise)
       SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
-  const ConstantType& FromCat2ConstLo(int32_t value, bool precise)
+  ConstantType& FromCat2ConstLo(int32_t value, bool precise)
       SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
-  const ConstantType& FromCat2ConstHi(int32_t value, bool precise)
+  ConstantType& FromCat2ConstHi(int32_t value, bool precise)
       SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
-  const RegType& FromDescriptor(mirror::ClassLoader* loader, const char* descriptor, bool precise)
+  RegType& FromDescriptor(mirror::ClassLoader* loader, const char* descriptor, bool precise)
       SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
-  const RegType& FromUnresolvedMerge(const RegType& left, const RegType& right)
+  RegType& FromUnresolvedMerge(RegType& left, RegType& right)
       SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
-  const RegType& FromUnresolvedSuperClass(const RegType& child)
+  RegType& FromUnresolvedSuperClass(RegType& child)
       SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
-  const RegType& JavaLangString() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+  RegType& JavaLangString() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
     // String is final and therefore always precise.
     return From(NULL, "Ljava/lang/String;", true);
   }
-  const RegType& JavaLangThrowable(bool precise)
+  RegType& JavaLangThrowable(bool precise)
       SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
     return From(NULL, "Ljava/lang/Throwable;", precise);
   }
-  const ConstantType& Zero() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+  ConstantType& Zero() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
     return FromCat1Const(0, true);
   }
-  const ConstantType& One() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+  ConstantType& One() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
     return FromCat1Const(1, true);
   }
   size_t GetCacheSize() {
     return entries_.size();
   }
-  const RegType& Boolean() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+  RegType& Boolean() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
     return *BooleanType::GetInstance();
   }
-  const RegType& Byte() {
+  RegType& Byte() {
     return *ByteType::GetInstance();
   }
-  const RegType& Char()  {
+  RegType& Char()  {
     return *CharType::GetInstance();
   }
-  const RegType& Short()  {
+  RegType& Short()  {
     return *ShortType::GetInstance();
   }
-  const RegType& Integer() {
+  RegType& Integer() {
     return *IntegerType::GetInstance();
   }
-  const RegType& Float() {
+  RegType& Float() {
     return *FloatType::GetInstance();
   }
-  const RegType& LongLo() {
+  RegType& LongLo() {
     return *LongLoType::GetInstance();
   }
-  const RegType& LongHi() {
+  RegType& LongHi() {
     return *LongHiType::GetInstance();
   }
-  const RegType& DoubleLo() {
+  RegType& DoubleLo() {
     return *DoubleLoType::GetInstance();
   }
-  const RegType& DoubleHi() {
+  RegType& DoubleHi() {
     return *DoubleHiType::GetInstance();
   }
-  const RegType& Undefined() {
+  RegType& Undefined() {
     return *UndefinedType::GetInstance();
   }
-  const RegType& Conflict() {
+  RegType& Conflict() {
     return *ConflictType::GetInstance();
   }
-  const RegType& JavaLangClass(bool precise) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+  RegType& JavaLangClass(bool precise) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
     return From(NULL, "Ljava/lang/Class;", precise);
   }
-  const RegType& JavaLangObject(bool precise) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+  RegType& JavaLangObject(bool precise) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
     return From(NULL, "Ljava/lang/Object;", precise);
   }
-  const UninitializedType& Uninitialized(const RegType& type, uint32_t allocation_pc)
+  UninitializedType& Uninitialized(RegType& type, uint32_t allocation_pc)
       SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
   // Create an uninitialized 'this' argument for the given type.
-  const UninitializedType& UninitializedThisArgument(const RegType& type)
+  UninitializedType& UninitializedThisArgument(RegType& type)
       SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
-  const RegType& FromUninitialized(const RegType& uninit_type)
+  RegType& FromUninitialized(RegType& uninit_type)
       SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
-  const ImpreciseConstType& ByteConstant() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
-  const ImpreciseConstType& CharConstant() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
-  const ImpreciseConstType& ShortConstant() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
-  const ImpreciseConstType& IntConstant() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
-  const ImpreciseConstType& PosByteConstant() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
-  const ImpreciseConstType& PosShortConstant() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
-  const RegType& GetComponentType(const RegType& array, mirror::ClassLoader* loader)
+  ImpreciseConstType& ByteConstant() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+  ImpreciseConstType& CharConstant() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+  ImpreciseConstType& ShortConstant() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+  ImpreciseConstType& IntConstant() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+  ImpreciseConstType& PosByteConstant() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+  ImpreciseConstType& PosShortConstant() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+  RegType& GetComponentType(RegType& array, mirror::ClassLoader* loader)
       SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
   void Dump(std::ostream& os) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
-  const RegType& RegTypeFromPrimitiveType(Primitive::Type) const;
+  RegType& RegTypeFromPrimitiveType(Primitive::Type) const;
 
   void VisitRoots(RootCallback* callback, void* arg) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
 
@@ -151,7 +151,7 @@
       SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
   bool MatchDescriptor(size_t idx, const char* descriptor, bool precise)
       SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
-  const ConstantType& FromCat1NonSmallConstant(int32_t value, bool precise)
+  ConstantType& FromCat1NonSmallConstant(int32_t value, bool precise)
       SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
 
   void AddEntry(RegType* new_entry);
diff --git a/runtime/verifier/reg_type_test.cc b/runtime/verifier/reg_type_test.cc
index 9dc0df1..e27558a 100644
--- a/runtime/verifier/reg_type_test.cc
+++ b/runtime/verifier/reg_type_test.cc
@@ -33,21 +33,21 @@
   // Tests creating primitive types types.
   ScopedObjectAccess soa(Thread::Current());
   RegTypeCache cache(true);
-  const RegType& ref_type_const_0 = cache.FromCat1Const(10, true);
-  const RegType& ref_type_const_1 = cache.FromCat1Const(10, true);
-  const RegType& ref_type_const_2 = cache.FromCat1Const(30, true);
-  const RegType& ref_type_const_3 = cache.FromCat1Const(30, false);
+  RegType& ref_type_const_0 = cache.FromCat1Const(10, true);
+  RegType& ref_type_const_1 = cache.FromCat1Const(10, true);
+  RegType& ref_type_const_2 = cache.FromCat1Const(30, true);
+  RegType& ref_type_const_3 = cache.FromCat1Const(30, false);
   EXPECT_TRUE(ref_type_const_0.Equals(ref_type_const_1));
   EXPECT_FALSE(ref_type_const_0.Equals(ref_type_const_2));
   EXPECT_FALSE(ref_type_const_0.Equals(ref_type_const_3));
 
-  const RegType& ref_type_const_wide_0 = cache.FromCat2ConstHi(50, true);
-  const RegType& ref_type_const_wide_1 = cache.FromCat2ConstHi(50, true);
+  RegType& ref_type_const_wide_0 = cache.FromCat2ConstHi(50, true);
+  RegType& ref_type_const_wide_1 = cache.FromCat2ConstHi(50, true);
   EXPECT_TRUE(ref_type_const_wide_0.Equals(ref_type_const_wide_1));
 
-  const RegType& ref_type_const_wide_2 = cache.FromCat2ConstLo(50, true);
-  const RegType& ref_type_const_wide_3 = cache.FromCat2ConstLo(50, true);
-  const RegType& ref_type_const_wide_4 = cache.FromCat2ConstLo(55, true);
+  RegType& ref_type_const_wide_2 = cache.FromCat2ConstLo(50, true);
+  RegType& ref_type_const_wide_3 = cache.FromCat2ConstLo(50, true);
+  RegType& ref_type_const_wide_4 = cache.FromCat2ConstLo(55, true);
   EXPECT_TRUE(ref_type_const_wide_2.Equals(ref_type_const_wide_3));
   EXPECT_FALSE(ref_type_const_wide_2.Equals(ref_type_const_wide_4));
 }
@@ -56,11 +56,11 @@
   ScopedObjectAccess soa(Thread::Current());
   RegTypeCache cache(true);
   int64_t val = static_cast<int32_t>(1234);
-  const RegType& precise_lo = cache.FromCat2ConstLo(static_cast<int32_t>(val), true);
-  const RegType& precise_hi = cache.FromCat2ConstHi(static_cast<int32_t>(val >> 32), true);
-  const RegType& precise_const = cache.FromCat1Const(static_cast<int32_t>(val >> 32), true);
-  const RegType& long_lo = cache.LongLo();
-  const RegType& long_hi = cache.LongHi();
+  RegType& precise_lo = cache.FromCat2ConstLo(static_cast<int32_t>(val), true);
+  RegType& precise_hi = cache.FromCat2ConstHi(static_cast<int32_t>(val >> 32), true);
+  RegType& precise_const = cache.FromCat1Const(static_cast<int32_t>(val >> 32), true);
+  RegType& long_lo = cache.LongLo();
+  RegType& long_hi = cache.LongHi();
   // Check sanity of types.
   EXPECT_TRUE(precise_lo.IsLowHalf());
   EXPECT_FALSE(precise_hi.IsLowHalf());
@@ -80,7 +80,7 @@
   ScopedObjectAccess soa(Thread::Current());
   RegTypeCache cache(true);
 
-  const RegType& bool_reg_type = cache.Boolean();
+  RegType& bool_reg_type = cache.Boolean();
   EXPECT_FALSE(bool_reg_type.IsUndefined());
   EXPECT_FALSE(bool_reg_type.IsConflict());
   EXPECT_FALSE(bool_reg_type.IsZero());
@@ -112,7 +112,7 @@
   EXPECT_TRUE(bool_reg_type.IsArrayIndexTypes());
   EXPECT_FALSE(bool_reg_type.IsNonZeroReferenceTypes());
 
-  const RegType& byte_reg_type = cache.Byte();
+  RegType& byte_reg_type = cache.Byte();
   EXPECT_FALSE(byte_reg_type.IsUndefined());
   EXPECT_FALSE(byte_reg_type.IsConflict());
   EXPECT_FALSE(byte_reg_type.IsZero());
@@ -144,7 +144,7 @@
   EXPECT_TRUE(byte_reg_type.IsArrayIndexTypes());
   EXPECT_FALSE(byte_reg_type.IsNonZeroReferenceTypes());
 
-  const RegType& char_reg_type = cache.Char();
+  RegType& char_reg_type = cache.Char();
   EXPECT_FALSE(char_reg_type.IsUndefined());
   EXPECT_FALSE(char_reg_type.IsConflict());
   EXPECT_FALSE(char_reg_type.IsZero());
@@ -176,7 +176,7 @@
   EXPECT_TRUE(char_reg_type.IsArrayIndexTypes());
   EXPECT_FALSE(char_reg_type.IsNonZeroReferenceTypes());
 
-  const RegType& short_reg_type = cache.Short();
+  RegType& short_reg_type = cache.Short();
   EXPECT_FALSE(short_reg_type.IsUndefined());
   EXPECT_FALSE(short_reg_type.IsConflict());
   EXPECT_FALSE(short_reg_type.IsZero());
@@ -208,7 +208,7 @@
   EXPECT_TRUE(short_reg_type.IsArrayIndexTypes());
   EXPECT_FALSE(short_reg_type.IsNonZeroReferenceTypes());
 
-  const RegType& int_reg_type = cache.Integer();
+  RegType& int_reg_type = cache.Integer();
   EXPECT_FALSE(int_reg_type.IsUndefined());
   EXPECT_FALSE(int_reg_type.IsConflict());
   EXPECT_FALSE(int_reg_type.IsZero());
@@ -240,7 +240,7 @@
   EXPECT_TRUE(int_reg_type.IsArrayIndexTypes());
   EXPECT_FALSE(int_reg_type.IsNonZeroReferenceTypes());
 
-  const RegType& long_reg_type = cache.LongLo();
+  RegType& long_reg_type = cache.LongLo();
   EXPECT_FALSE(long_reg_type.IsUndefined());
   EXPECT_FALSE(long_reg_type.IsConflict());
   EXPECT_FALSE(long_reg_type.IsZero());
@@ -272,7 +272,7 @@
   EXPECT_FALSE(long_reg_type.IsArrayIndexTypes());
   EXPECT_FALSE(long_reg_type.IsNonZeroReferenceTypes());
 
-  const RegType& float_reg_type = cache.Float();
+  RegType& float_reg_type = cache.Float();
   EXPECT_FALSE(float_reg_type.IsUndefined());
   EXPECT_FALSE(float_reg_type.IsConflict());
   EXPECT_FALSE(float_reg_type.IsZero());
@@ -304,7 +304,7 @@
   EXPECT_FALSE(float_reg_type.IsArrayIndexTypes());
   EXPECT_FALSE(float_reg_type.IsNonZeroReferenceTypes());
 
-  const RegType& double_reg_type = cache.DoubleLo();
+  RegType& double_reg_type = cache.DoubleLo();
   EXPECT_FALSE(double_reg_type.IsUndefined());
   EXPECT_FALSE(double_reg_type.IsConflict());
   EXPECT_FALSE(double_reg_type.IsZero());
@@ -344,9 +344,9 @@
   // match the one that is imprecise.
   ScopedObjectAccess soa(Thread::Current());
   RegTypeCache cache(true);
-  const RegType& imprecise_obj = cache.JavaLangObject(false);
-  const RegType& precise_obj = cache.JavaLangObject(true);
-  const RegType& precise_obj_2 = cache.FromDescriptor(NULL, "Ljava/lang/Object;", true);
+  RegType& imprecise_obj = cache.JavaLangObject(false);
+  RegType& precise_obj = cache.JavaLangObject(true);
+  RegType& precise_obj_2 = cache.FromDescriptor(NULL, "Ljava/lang/Object;", true);
 
   EXPECT_TRUE(precise_obj.Equals(precise_obj_2));
   EXPECT_FALSE(imprecise_obj.Equals(precise_obj));
@@ -359,14 +359,14 @@
   // a hit second time.
   ScopedObjectAccess soa(Thread::Current());
   RegTypeCache cache(true);
-  const RegType& ref_type_0 = cache.FromDescriptor(NULL, "Ljava/lang/DoesNotExist;", true);
+  RegType& ref_type_0 = cache.FromDescriptor(NULL, "Ljava/lang/DoesNotExist;", true);
   EXPECT_TRUE(ref_type_0.IsUnresolvedReference());
   EXPECT_TRUE(ref_type_0.IsNonZeroReferenceTypes());
 
-  const RegType& ref_type_1 = cache.FromDescriptor(NULL, "Ljava/lang/DoesNotExist;", true);
+  RegType& ref_type_1 = cache.FromDescriptor(NULL, "Ljava/lang/DoesNotExist;", true);
   EXPECT_TRUE(ref_type_0.Equals(ref_type_1));
 
-  const RegType& unresolved_super_class =  cache.FromUnresolvedSuperClass(ref_type_0);
+  RegType& unresolved_super_class =  cache.FromUnresolvedSuperClass(ref_type_0);
   EXPECT_TRUE(unresolved_super_class.IsUnresolvedSuperClass());
   EXPECT_TRUE(unresolved_super_class.IsNonZeroReferenceTypes());
 }
@@ -375,21 +375,21 @@
   // Tests creating types uninitialized types from unresolved types.
   ScopedObjectAccess soa(Thread::Current());
   RegTypeCache cache(true);
-  const RegType& ref_type_0 = cache.FromDescriptor(NULL, "Ljava/lang/DoesNotExist;", true);
+  RegType& ref_type_0 = cache.FromDescriptor(NULL, "Ljava/lang/DoesNotExist;", true);
   EXPECT_TRUE(ref_type_0.IsUnresolvedReference());
-  const RegType& ref_type = cache.FromDescriptor(NULL, "Ljava/lang/DoesNotExist;", true);
+  RegType& ref_type = cache.FromDescriptor(NULL, "Ljava/lang/DoesNotExist;", true);
   EXPECT_TRUE(ref_type_0.Equals(ref_type));
   // Create an uninitialized type of this unresolved type
-  const RegType& unresolved_unintialised = cache.Uninitialized(ref_type, 1101ull);
+  RegType& unresolved_unintialised = cache.Uninitialized(ref_type, 1101ull);
   EXPECT_TRUE(unresolved_unintialised.IsUnresolvedAndUninitializedReference());
   EXPECT_TRUE(unresolved_unintialised.IsUninitializedTypes());
   EXPECT_TRUE(unresolved_unintialised.IsNonZeroReferenceTypes());
   // Create an uninitialized type of this unresolved type with different  PC
-  const RegType& ref_type_unresolved_unintialised_1 =  cache.Uninitialized(ref_type, 1102ull);
+  RegType& ref_type_unresolved_unintialised_1 =  cache.Uninitialized(ref_type, 1102ull);
   EXPECT_TRUE(unresolved_unintialised.IsUnresolvedAndUninitializedReference());
   EXPECT_FALSE(unresolved_unintialised.Equals(ref_type_unresolved_unintialised_1));
   // Create an uninitialized type of this unresolved type with the same PC
-  const RegType& unresolved_unintialised_2 = cache.Uninitialized(ref_type, 1101ull);
+  RegType& unresolved_unintialised_2 = cache.Uninitialized(ref_type, 1101ull);
   EXPECT_TRUE(unresolved_unintialised.Equals(unresolved_unintialised_2));
 }
 
@@ -397,12 +397,12 @@
   // Tests types for proper Dump messages.
   ScopedObjectAccess soa(Thread::Current());
   RegTypeCache cache(true);
-  const RegType& unresolved_ref = cache.FromDescriptor(NULL, "Ljava/lang/DoesNotExist;", true);
-  const RegType& unresolved_ref_another = cache.FromDescriptor(NULL, "Ljava/lang/DoesNotExistEither;", true);
-  const RegType& resolved_ref = cache.JavaLangString();
-  const RegType& resolved_unintialiesd = cache.Uninitialized(resolved_ref, 10);
-  const RegType& unresolved_unintialized = cache.Uninitialized(unresolved_ref, 12);
-  const RegType& unresolved_merged = cache.FromUnresolvedMerge(unresolved_ref, unresolved_ref_another);
+  RegType& unresolved_ref = cache.FromDescriptor(NULL, "Ljava/lang/DoesNotExist;", true);
+  RegType& unresolved_ref_another = cache.FromDescriptor(NULL, "Ljava/lang/DoesNotExistEither;", true);
+  RegType& resolved_ref = cache.JavaLangString();
+  RegType& resolved_unintialiesd = cache.Uninitialized(resolved_ref, 10);
+  RegType& unresolved_unintialized = cache.Uninitialized(unresolved_ref, 12);
+  RegType& unresolved_merged = cache.FromUnresolvedMerge(unresolved_ref, unresolved_ref_another);
 
   std::string expected = "Unresolved Reference: java.lang.DoesNotExist";
   EXPECT_EQ(expected, unresolved_ref.Dump());
@@ -422,16 +422,16 @@
   // The JavaLangObject method instead of FromDescriptor. String class is final.
   ScopedObjectAccess soa(Thread::Current());
   RegTypeCache cache(true);
-  const RegType& ref_type = cache.JavaLangString();
-  const RegType& ref_type_2 = cache.JavaLangString();
-  const RegType& ref_type_3 = cache.FromDescriptor(NULL, "Ljava/lang/String;", true);
+  RegType& ref_type = cache.JavaLangString();
+  RegType& ref_type_2 = cache.JavaLangString();
+  RegType& ref_type_3 = cache.FromDescriptor(NULL, "Ljava/lang/String;", true);
 
   EXPECT_TRUE(ref_type.Equals(ref_type_2));
   EXPECT_TRUE(ref_type_2.Equals(ref_type_3));
   EXPECT_TRUE(ref_type.IsPreciseReference());
 
   // Create an uninitialized type out of this:
-  const RegType& ref_type_unintialized = cache.Uninitialized(ref_type, 0110ull);
+  RegType& ref_type_unintialized = cache.Uninitialized(ref_type, 0110ull);
   EXPECT_TRUE(ref_type_unintialized.IsUninitializedReference());
   EXPECT_FALSE(ref_type_unintialized.IsUnresolvedAndUninitializedReference());
 }
@@ -442,9 +442,9 @@
   // The JavaLangObject method instead of FromDescriptor. Object Class in not final.
   ScopedObjectAccess soa(Thread::Current());
   RegTypeCache cache(true);
-  const RegType& ref_type = cache.JavaLangObject(true);
-  const RegType& ref_type_2 = cache.JavaLangObject(true);
-  const RegType& ref_type_3 = cache.FromDescriptor(NULL, "Ljava/lang/Object;", true);
+  RegType& ref_type = cache.JavaLangObject(true);
+  RegType& ref_type_2 = cache.JavaLangObject(true);
+  RegType& ref_type_3 = cache.FromDescriptor(NULL, "Ljava/lang/Object;", true);
 
   EXPECT_TRUE(ref_type.Equals(ref_type_2));
   EXPECT_TRUE(ref_type_3.Equals(ref_type_2));
@@ -455,20 +455,19 @@
   // String and object , LUB is object.
   ScopedObjectAccess soa(Thread::Current());
   RegTypeCache cache_new(true);
-  const RegType& string = cache_new.JavaLangString();
-  const RegType& Object = cache_new.JavaLangObject(true);
+  RegType& string = cache_new.JavaLangString();
+  RegType& Object = cache_new.JavaLangObject(true);
   EXPECT_TRUE(string.Merge(Object, &cache_new).IsJavaLangObject());
   // Merge two unresolved types.
-  const RegType& ref_type_0 = cache_new.FromDescriptor(NULL, "Ljava/lang/DoesNotExist;", true);
+  RegType& ref_type_0 = cache_new.FromDescriptor(NULL, "Ljava/lang/DoesNotExist;", true);
   EXPECT_TRUE(ref_type_0.IsUnresolvedReference());
-  const RegType& ref_type_1 = cache_new.FromDescriptor(NULL, "Ljava/lang/DoesNotExistToo;", true);
+  RegType& ref_type_1 = cache_new.FromDescriptor(NULL, "Ljava/lang/DoesNotExistToo;", true);
   EXPECT_FALSE(ref_type_0.Equals(ref_type_1));
 
-  const RegType& merged = ref_type_1.Merge(ref_type_0, &cache_new);
+  RegType& merged = ref_type_1.Merge(ref_type_0, &cache_new);
   EXPECT_TRUE(merged.IsUnresolvedMergedReference());
-  RegType& merged_nonconst = const_cast<RegType&>(merged);
 
-  std::set<uint16_t> merged_ids = (down_cast<UnresolvedMergedType*>(&merged_nonconst))->GetMergedTypes();
+  std::set<uint16_t> merged_ids = (down_cast<UnresolvedMergedType*>(&merged))->GetMergedTypes();
   EXPECT_EQ(ref_type_0.GetId(), *(merged_ids.begin()));
   EXPECT_EQ(ref_type_1.GetId(), *((++merged_ids.begin())));
 }
@@ -479,27 +478,27 @@
   RegTypeCache cache_new(true);
 
   constexpr int32_t kTestConstantValue = 10;
-  const RegType& float_type = cache_new.Float();
-  const RegType& precise_cst = cache_new.FromCat1Const(kTestConstantValue, true);
-  const RegType& imprecise_cst = cache_new.FromCat1Const(kTestConstantValue, false);
+  RegType& float_type = cache_new.Float();
+  RegType& precise_cst = cache_new.FromCat1Const(kTestConstantValue, true);
+  RegType& imprecise_cst = cache_new.FromCat1Const(kTestConstantValue, false);
   {
     // float MERGE precise cst => float.
-    const RegType& merged = float_type.Merge(precise_cst, &cache_new);
+    RegType& merged = float_type.Merge(precise_cst, &cache_new);
     EXPECT_TRUE(merged.IsFloat());
   }
   {
     // precise cst MERGE float => float.
-    const RegType& merged = precise_cst.Merge(float_type, &cache_new);
+    RegType& merged = precise_cst.Merge(float_type, &cache_new);
     EXPECT_TRUE(merged.IsFloat());
   }
   {
     // float MERGE imprecise cst => float.
-    const RegType& merged = float_type.Merge(imprecise_cst, &cache_new);
+    RegType& merged = float_type.Merge(imprecise_cst, &cache_new);
     EXPECT_TRUE(merged.IsFloat());
   }
   {
     // imprecise cst MERGE float => float.
-    const RegType& merged = imprecise_cst.Merge(float_type, &cache_new);
+    RegType& merged = imprecise_cst.Merge(float_type, &cache_new);
     EXPECT_TRUE(merged.IsFloat());
   }
 }
@@ -510,50 +509,50 @@
   RegTypeCache cache_new(true);
 
   constexpr int32_t kTestConstantValue = 10;
-  const RegType& long_lo_type = cache_new.LongLo();
-  const RegType& long_hi_type = cache_new.LongHi();
-  const RegType& precise_cst_lo = cache_new.FromCat2ConstLo(kTestConstantValue, true);
-  const RegType& imprecise_cst_lo = cache_new.FromCat2ConstLo(kTestConstantValue, false);
-  const RegType& precise_cst_hi = cache_new.FromCat2ConstHi(kTestConstantValue, true);
-  const RegType& imprecise_cst_hi = cache_new.FromCat2ConstHi(kTestConstantValue, false);
+  RegType& long_lo_type = cache_new.LongLo();
+  RegType& long_hi_type = cache_new.LongHi();
+  RegType& precise_cst_lo = cache_new.FromCat2ConstLo(kTestConstantValue, true);
+  RegType& imprecise_cst_lo = cache_new.FromCat2ConstLo(kTestConstantValue, false);
+  RegType& precise_cst_hi = cache_new.FromCat2ConstHi(kTestConstantValue, true);
+  RegType& imprecise_cst_hi = cache_new.FromCat2ConstHi(kTestConstantValue, false);
   {
     // lo MERGE precise cst lo => lo.
-    const RegType& merged = long_lo_type.Merge(precise_cst_lo, &cache_new);
+    RegType& merged = long_lo_type.Merge(precise_cst_lo, &cache_new);
     EXPECT_TRUE(merged.IsLongLo());
   }
   {
     // precise cst lo MERGE lo => lo.
-    const RegType& merged = precise_cst_lo.Merge(long_lo_type, &cache_new);
+    RegType& merged = precise_cst_lo.Merge(long_lo_type, &cache_new);
     EXPECT_TRUE(merged.IsLongLo());
   }
   {
     // lo MERGE imprecise cst lo => lo.
-    const RegType& merged = long_lo_type.Merge(imprecise_cst_lo, &cache_new);
+    RegType& merged = long_lo_type.Merge(imprecise_cst_lo, &cache_new);
     EXPECT_TRUE(merged.IsLongLo());
   }
   {
     // imprecise cst lo MERGE lo => lo.
-    const RegType& merged = imprecise_cst_lo.Merge(long_lo_type, &cache_new);
+    RegType& merged = imprecise_cst_lo.Merge(long_lo_type, &cache_new);
     EXPECT_TRUE(merged.IsLongLo());
   }
   {
     // hi MERGE precise cst hi => hi.
-    const RegType& merged = long_hi_type.Merge(precise_cst_hi, &cache_new);
+    RegType& merged = long_hi_type.Merge(precise_cst_hi, &cache_new);
     EXPECT_TRUE(merged.IsLongHi());
   }
   {
     // precise cst hi MERGE hi => hi.
-    const RegType& merged = precise_cst_hi.Merge(long_hi_type, &cache_new);
+    RegType& merged = precise_cst_hi.Merge(long_hi_type, &cache_new);
     EXPECT_TRUE(merged.IsLongHi());
   }
   {
     // hi MERGE imprecise cst hi => hi.
-    const RegType& merged = long_hi_type.Merge(imprecise_cst_hi, &cache_new);
+    RegType& merged = long_hi_type.Merge(imprecise_cst_hi, &cache_new);
     EXPECT_TRUE(merged.IsLongHi());
   }
   {
     // imprecise cst hi MERGE hi => hi.
-    const RegType& merged = imprecise_cst_hi.Merge(long_hi_type, &cache_new);
+    RegType& merged = imprecise_cst_hi.Merge(long_hi_type, &cache_new);
     EXPECT_TRUE(merged.IsLongHi());
   }
 }
@@ -564,50 +563,50 @@
   RegTypeCache cache_new(true);
 
   constexpr int32_t kTestConstantValue = 10;
-  const RegType& double_lo_type = cache_new.DoubleLo();
-  const RegType& double_hi_type = cache_new.DoubleHi();
-  const RegType& precise_cst_lo = cache_new.FromCat2ConstLo(kTestConstantValue, true);
-  const RegType& imprecise_cst_lo = cache_new.FromCat2ConstLo(kTestConstantValue, false);
-  const RegType& precise_cst_hi = cache_new.FromCat2ConstHi(kTestConstantValue, true);
-  const RegType& imprecise_cst_hi = cache_new.FromCat2ConstHi(kTestConstantValue, false);
+  RegType& double_lo_type = cache_new.DoubleLo();
+  RegType& double_hi_type = cache_new.DoubleHi();
+  RegType& precise_cst_lo = cache_new.FromCat2ConstLo(kTestConstantValue, true);
+  RegType& imprecise_cst_lo = cache_new.FromCat2ConstLo(kTestConstantValue, false);
+  RegType& precise_cst_hi = cache_new.FromCat2ConstHi(kTestConstantValue, true);
+  RegType& imprecise_cst_hi = cache_new.FromCat2ConstHi(kTestConstantValue, false);
   {
     // lo MERGE precise cst lo => lo.
-    const RegType& merged = double_lo_type.Merge(precise_cst_lo, &cache_new);
+    RegType& merged = double_lo_type.Merge(precise_cst_lo, &cache_new);
     EXPECT_TRUE(merged.IsDoubleLo());
   }
   {
     // precise cst lo MERGE lo => lo.
-    const RegType& merged = precise_cst_lo.Merge(double_lo_type, &cache_new);
+    RegType& merged = precise_cst_lo.Merge(double_lo_type, &cache_new);
     EXPECT_TRUE(merged.IsDoubleLo());
   }
   {
     // lo MERGE imprecise cst lo => lo.
-    const RegType& merged = double_lo_type.Merge(imprecise_cst_lo, &cache_new);
+    RegType& merged = double_lo_type.Merge(imprecise_cst_lo, &cache_new);
     EXPECT_TRUE(merged.IsDoubleLo());
   }
   {
     // imprecise cst lo MERGE lo => lo.
-    const RegType& merged = imprecise_cst_lo.Merge(double_lo_type, &cache_new);
+    RegType& merged = imprecise_cst_lo.Merge(double_lo_type, &cache_new);
     EXPECT_TRUE(merged.IsDoubleLo());
   }
   {
     // hi MERGE precise cst hi => hi.
-    const RegType& merged = double_hi_type.Merge(precise_cst_hi, &cache_new);
+    RegType& merged = double_hi_type.Merge(precise_cst_hi, &cache_new);
     EXPECT_TRUE(merged.IsDoubleHi());
   }
   {
     // precise cst hi MERGE hi => hi.
-    const RegType& merged = precise_cst_hi.Merge(double_hi_type, &cache_new);
+    RegType& merged = precise_cst_hi.Merge(double_hi_type, &cache_new);
     EXPECT_TRUE(merged.IsDoubleHi());
   }
   {
     // hi MERGE imprecise cst hi => hi.
-    const RegType& merged = double_hi_type.Merge(imprecise_cst_hi, &cache_new);
+    RegType& merged = double_hi_type.Merge(imprecise_cst_hi, &cache_new);
     EXPECT_TRUE(merged.IsDoubleHi());
   }
   {
     // imprecise cst hi MERGE hi => hi.
-    const RegType& merged = imprecise_cst_hi.Merge(double_hi_type, &cache_new);
+    RegType& merged = imprecise_cst_hi.Merge(double_hi_type, &cache_new);
     EXPECT_TRUE(merged.IsDoubleHi());
   }
 }
@@ -616,8 +615,8 @@
   // Tests creating primitive types types.
   ScopedObjectAccess soa(Thread::Current());
   RegTypeCache cache_new(true);
-  const RegType& imprecise_const = cache_new.FromCat1Const(10, false);
-  const RegType& precise_const = cache_new.FromCat1Const(10, true);
+  RegType& imprecise_const = cache_new.FromCat1Const(10, false);
+  RegType& precise_const = cache_new.FromCat1Const(10, true);
 
   EXPECT_TRUE(imprecise_const.IsImpreciseConstant());
   EXPECT_TRUE(precise_const.IsPreciseConstant());
diff --git a/runtime/verifier/register_line-inl.h b/runtime/verifier/register_line-inl.h
index 0989cd0..378c6d3 100644
--- a/runtime/verifier/register_line-inl.h
+++ b/runtime/verifier/register_line-inl.h
@@ -25,7 +25,7 @@
 namespace art {
 namespace verifier {
 
-inline const RegType& RegisterLine::GetRegisterType(uint32_t vsrc) const {
+inline RegType& RegisterLine::GetRegisterType(uint32_t vsrc) const {
   // The register index was validated during the static pass, so we don't need to check it here.
   DCHECK_LT(vsrc, num_regs_);
   return verifier_->GetRegTypeCache()->GetFromId(line_[vsrc]);
diff --git a/runtime/verifier/register_line.cc b/runtime/verifier/register_line.cc
index 556056c..4d67cfb 100644
--- a/runtime/verifier/register_line.cc
+++ b/runtime/verifier/register_line.cc
@@ -36,7 +36,7 @@
   return true;
 }
 
-bool RegisterLine::SetRegisterType(uint32_t vdst, const RegType& new_type) {
+bool RegisterLine::SetRegisterType(uint32_t vdst, RegType& new_type) {
   DCHECK_LT(vdst, num_regs_);
   if (new_type.IsLowHalf() || new_type.IsHighHalf()) {
     verifier_->Fail(VERIFY_ERROR_BAD_CLASS_HARD) << "Expected category1 register type not '"
@@ -53,8 +53,8 @@
   return true;
 }
 
-bool RegisterLine::SetRegisterTypeWide(uint32_t vdst, const RegType& new_type1,
-                                       const RegType& new_type2) {
+bool RegisterLine::SetRegisterTypeWide(uint32_t vdst, RegType& new_type1,
+                                       RegType& new_type2) {
   DCHECK_LT(vdst + 1, num_regs_);
   if (!new_type1.CheckWidePair(new_type2)) {
     verifier_->Fail(VERIFY_ERROR_BAD_CLASS_SOFT) << "Invalid wide pair '"
@@ -75,21 +75,21 @@
   result_[1] = result_[0];
 }
 
-void RegisterLine::SetResultRegisterType(const RegType& new_type) {
+void RegisterLine::SetResultRegisterType(RegType& new_type) {
   DCHECK(!new_type.IsLowHalf());
   DCHECK(!new_type.IsHighHalf());
   result_[0] = new_type.GetId();
   result_[1] = verifier_->GetRegTypeCache()->Undefined().GetId();
 }
 
-void RegisterLine::SetResultRegisterTypeWide(const RegType& new_type1,
-                                             const RegType& new_type2) {
+void RegisterLine::SetResultRegisterTypeWide(RegType& new_type1,
+                                             RegType& new_type2) {
   DCHECK(new_type1.CheckWidePair(new_type2));
   result_[0] = new_type1.GetId();
   result_[1] = new_type2.GetId();
 }
 
-const RegType& RegisterLine::GetInvocationThis(const Instruction* inst, bool is_range) {
+RegType& RegisterLine::GetInvocationThis(const Instruction* inst, bool is_range) {
   const size_t args_count = is_range ? inst->VRegA_3rc() : inst->VRegA_35c();
   if (args_count < 1) {
     verifier_->Fail(VERIFY_ERROR_BAD_CLASS_HARD) << "invoke lacks 'this'";
@@ -97,7 +97,7 @@
   }
   /* Get the element type of the array held in vsrc */
   const uint32_t this_reg = (is_range) ? inst->VRegC_3rc() : inst->VRegC_35c();
-  const RegType& this_type = GetRegisterType(this_reg);
+  RegType& this_type = GetRegisterType(this_reg);
   if (!this_type.IsReferenceTypes()) {
     verifier_->Fail(VERIFY_ERROR_BAD_CLASS_HARD) << "tried to get class from non-reference register v"
                                                  << this_reg << " (type=" << this_type << ")";
@@ -107,9 +107,9 @@
 }
 
 bool RegisterLine::VerifyRegisterType(uint32_t vsrc,
-                                      const RegType& check_type) {
+                                      RegType& check_type) {
   // Verify the src register type against the check type refining the type of the register
-  const RegType& src_type = GetRegisterType(vsrc);
+  RegType& src_type = GetRegisterType(vsrc);
   if (!(check_type.IsAssignableFrom(src_type))) {
     enum VerifyError fail_type;
     if (!check_type.IsNonZeroReferenceTypes() || !src_type.IsNonZeroReferenceTypes()) {
@@ -125,7 +125,7 @@
     return false;
   }
   if (check_type.IsLowHalf()) {
-    const RegType& src_type_h = GetRegisterType(vsrc + 1);
+    RegType& src_type_h = GetRegisterType(vsrc + 1);
     if (!src_type.CheckWidePair(src_type_h)) {
       verifier_->Fail(VERIFY_ERROR_BAD_CLASS_HARD) << "wide register v" << vsrc << " has type "
                                                    << src_type << "/" << src_type_h;
@@ -139,17 +139,17 @@
   return true;
 }
 
-bool RegisterLine::VerifyRegisterTypeWide(uint32_t vsrc, const RegType& check_type1,
-                                          const RegType& check_type2) {
+bool RegisterLine::VerifyRegisterTypeWide(uint32_t vsrc, RegType& check_type1,
+                                          RegType& check_type2) {
   DCHECK(check_type1.CheckWidePair(check_type2));
   // Verify the src register type against the check type refining the type of the register
-  const RegType& src_type = GetRegisterType(vsrc);
+  RegType& src_type = GetRegisterType(vsrc);
   if (!check_type1.IsAssignableFrom(src_type)) {
     verifier_->Fail(VERIFY_ERROR_BAD_CLASS_HARD) << "register v" << vsrc << " has type " << src_type
                                << " but expected " << check_type1;
     return false;
   }
-  const RegType& src_type_h = GetRegisterType(vsrc + 1);
+  RegType& src_type_h = GetRegisterType(vsrc + 1);
   if (!src_type.CheckWidePair(src_type_h)) {
     verifier_->Fail(VERIFY_ERROR_BAD_CLASS_HARD) << "wide register v" << vsrc << " has type "
         << src_type << "/" << src_type_h;
@@ -162,9 +162,9 @@
   return true;
 }
 
-void RegisterLine::MarkRefsAsInitialized(const RegType& uninit_type) {
+void RegisterLine::MarkRefsAsInitialized(RegType& uninit_type) {
   DCHECK(uninit_type.IsUninitializedTypes());
-  const RegType& init_type = verifier_->GetRegTypeCache()->FromUninitialized(uninit_type);
+  RegType& init_type = verifier_->GetRegTypeCache()->FromUninitialized(uninit_type);
   size_t changed = 0;
   for (uint32_t i = 0; i < num_regs_; i++) {
     if (GetRegisterType(i).Equals(uninit_type)) {
@@ -200,7 +200,7 @@
   }
 }
 
-std::string RegisterLine::Dump() const {
+std::string RegisterLine::Dump() {
   std::string result;
   for (size_t i = 0; i < num_regs_; i++) {
     result += StringPrintf("%zd:[", i);
@@ -213,7 +213,7 @@
   return result;
 }
 
-void RegisterLine::MarkUninitRefsAsInvalid(const RegType& uninit_type) {
+void RegisterLine::MarkUninitRefsAsInvalid(RegType& uninit_type) {
   for (size_t i = 0; i < num_regs_; i++) {
     if (GetRegisterType(i).Equals(uninit_type)) {
       line_[i] = verifier_->GetRegTypeCache()->Conflict().GetId();
@@ -224,7 +224,7 @@
 
 void RegisterLine::CopyRegister1(uint32_t vdst, uint32_t vsrc, TypeCategory cat) {
   DCHECK(cat == kTypeCategory1nr || cat == kTypeCategoryRef);
-  const RegType& type = GetRegisterType(vsrc);
+  RegType& type = GetRegisterType(vsrc);
   if (!SetRegisterType(vdst, type)) {
     return;
   }
@@ -238,8 +238,8 @@
 }
 
 void RegisterLine::CopyRegister2(uint32_t vdst, uint32_t vsrc) {
-  const RegType& type_l = GetRegisterType(vsrc);
-  const RegType& type_h = GetRegisterType(vsrc + 1);
+  RegType& type_l = GetRegisterType(vsrc);
+  RegType& type_h = GetRegisterType(vsrc + 1);
 
   if (!type_l.CheckWidePair(type_h)) {
     verifier_->Fail(VERIFY_ERROR_BAD_CLASS_HARD) << "copy2 v" << vdst << "<-v" << vsrc
@@ -250,7 +250,7 @@
 }
 
 void RegisterLine::CopyResultRegister1(uint32_t vdst, bool is_reference) {
-  const RegType& type = verifier_->GetRegTypeCache()->GetFromId(result_[0]);
+  RegType& type = verifier_->GetRegTypeCache()->GetFromId(result_[0]);
   if ((!is_reference && !type.IsCategory1Types()) ||
       (is_reference && !type.IsReferenceTypes())) {
     verifier_->Fail(VERIFY_ERROR_BAD_CLASS_HARD)
@@ -267,8 +267,8 @@
  * register to another register, and reset the result register.
  */
 void RegisterLine::CopyResultRegister2(uint32_t vdst) {
-  const RegType& type_l = verifier_->GetRegTypeCache()->GetFromId(result_[0]);
-  const RegType& type_h = verifier_->GetRegTypeCache()->GetFromId(result_[1]);
+  RegType& type_l = verifier_->GetRegTypeCache()->GetFromId(result_[0]);
+  RegType& type_h = verifier_->GetRegTypeCache()->GetFromId(result_[1]);
   if (!type_l.IsCategory2Types()) {
     verifier_->Fail(VERIFY_ERROR_BAD_CLASS_HARD)
         << "copyRes2 v" << vdst << "<- result0"  << " type=" << type_l;
@@ -281,40 +281,40 @@
 }
 
 void RegisterLine::CheckUnaryOp(const Instruction* inst,
-                                const RegType& dst_type,
-                                const RegType& src_type) {
+                                RegType& dst_type,
+                                RegType& src_type) {
   if (VerifyRegisterType(inst->VRegB_12x(), src_type)) {
     SetRegisterType(inst->VRegA_12x(), dst_type);
   }
 }
 
 void RegisterLine::CheckUnaryOpWide(const Instruction* inst,
-                                    const RegType& dst_type1, const RegType& dst_type2,
-                                    const RegType& src_type1, const RegType& src_type2) {
+                                    RegType& dst_type1, RegType& dst_type2,
+                                    RegType& src_type1, RegType& src_type2) {
   if (VerifyRegisterTypeWide(inst->VRegB_12x(), src_type1, src_type2)) {
     SetRegisterTypeWide(inst->VRegA_12x(), dst_type1, dst_type2);
   }
 }
 
 void RegisterLine::CheckUnaryOpToWide(const Instruction* inst,
-                                      const RegType& dst_type1, const RegType& dst_type2,
-                                      const RegType& src_type) {
+                                      RegType& dst_type1, RegType& dst_type2,
+                                      RegType& src_type) {
   if (VerifyRegisterType(inst->VRegB_12x(), src_type)) {
     SetRegisterTypeWide(inst->VRegA_12x(), dst_type1, dst_type2);
   }
 }
 
 void RegisterLine::CheckUnaryOpFromWide(const Instruction* inst,
-                                        const RegType& dst_type,
-                                        const RegType& src_type1, const RegType& src_type2) {
+                                        RegType& dst_type,
+                                        RegType& src_type1, RegType& src_type2) {
   if (VerifyRegisterTypeWide(inst->VRegB_12x(), src_type1, src_type2)) {
     SetRegisterType(inst->VRegA_12x(), dst_type);
   }
 }
 
 void RegisterLine::CheckBinaryOp(const Instruction* inst,
-                                 const RegType& dst_type,
-                                 const RegType& src_type1, const RegType& src_type2,
+                                 RegType& dst_type,
+                                 RegType& src_type1, RegType& src_type2,
                                  bool check_boolean_op) {
   const uint32_t vregB = inst->VRegB_23x();
   const uint32_t vregC = inst->VRegC_23x();
@@ -333,9 +333,9 @@
 }
 
 void RegisterLine::CheckBinaryOpWide(const Instruction* inst,
-                                     const RegType& dst_type1, const RegType& dst_type2,
-                                     const RegType& src_type1_1, const RegType& src_type1_2,
-                                     const RegType& src_type2_1, const RegType& src_type2_2) {
+                                     RegType& dst_type1, RegType& dst_type2,
+                                     RegType& src_type1_1, RegType& src_type1_2,
+                                     RegType& src_type2_1, RegType& src_type2_2) {
   if (VerifyRegisterTypeWide(inst->VRegB_23x(), src_type1_1, src_type1_2) &&
       VerifyRegisterTypeWide(inst->VRegC_23x(), src_type2_1, src_type2_2)) {
     SetRegisterTypeWide(inst->VRegA_23x(), dst_type1, dst_type2);
@@ -343,8 +343,8 @@
 }
 
 void RegisterLine::CheckBinaryOpWideShift(const Instruction* inst,
-                                          const RegType& long_lo_type, const RegType& long_hi_type,
-                                          const RegType& int_type) {
+                                          RegType& long_lo_type, RegType& long_hi_type,
+                                          RegType& int_type) {
   if (VerifyRegisterTypeWide(inst->VRegB_23x(), long_lo_type, long_hi_type) &&
       VerifyRegisterType(inst->VRegC_23x(), int_type)) {
     SetRegisterTypeWide(inst->VRegA_23x(), long_lo_type, long_hi_type);
@@ -352,8 +352,8 @@
 }
 
 void RegisterLine::CheckBinaryOp2addr(const Instruction* inst,
-                                      const RegType& dst_type, const RegType& src_type1,
-                                      const RegType& src_type2, bool check_boolean_op) {
+                                      RegType& dst_type, RegType& src_type1,
+                                      RegType& src_type2, bool check_boolean_op) {
   const uint32_t vregA = inst->VRegA_12x();
   const uint32_t vregB = inst->VRegB_12x();
   if (VerifyRegisterType(vregA, src_type1) &&
@@ -371,9 +371,9 @@
 }
 
 void RegisterLine::CheckBinaryOp2addrWide(const Instruction* inst,
-                                          const RegType& dst_type1, const RegType& dst_type2,
-                                          const RegType& src_type1_1, const RegType& src_type1_2,
-                                          const RegType& src_type2_1, const RegType& src_type2_2) {
+                                          RegType& dst_type1, RegType& dst_type2,
+                                          RegType& src_type1_1, RegType& src_type1_2,
+                                          RegType& src_type2_1, RegType& src_type2_2) {
   const uint32_t vregA = inst->VRegA_12x();
   const uint32_t vregB = inst->VRegB_12x();
   if (VerifyRegisterTypeWide(vregA, src_type1_1, src_type1_2) &&
@@ -383,8 +383,8 @@
 }
 
 void RegisterLine::CheckBinaryOp2addrWideShift(const Instruction* inst,
-                                               const RegType& long_lo_type, const RegType& long_hi_type,
-                                               const RegType& int_type) {
+                                               RegType& long_lo_type, RegType& long_hi_type,
+                                               RegType& int_type) {
   const uint32_t vregA = inst->VRegA_12x();
   const uint32_t vregB = inst->VRegB_12x();
   if (VerifyRegisterTypeWide(vregA, long_lo_type, long_hi_type) &&
@@ -394,7 +394,7 @@
 }
 
 void RegisterLine::CheckLiteralOp(const Instruction* inst,
-                                  const RegType& dst_type, const RegType& src_type,
+                                  RegType& dst_type, RegType& src_type,
                                   bool check_boolean_op, bool is_lit16) {
   const uint32_t vregA = is_lit16 ? inst->VRegA_22s() : inst->VRegA_22b();
   const uint32_t vregB = is_lit16 ? inst->VRegB_22s() : inst->VRegB_22b();
@@ -413,7 +413,7 @@
 }
 
 void RegisterLine::PushMonitor(uint32_t reg_idx, int32_t insn_idx) {
-  const RegType& reg_type = GetRegisterType(reg_idx);
+  RegType& reg_type = GetRegisterType(reg_idx);
   if (!reg_type.IsReferenceTypes()) {
     verifier_->Fail(VERIFY_ERROR_BAD_CLASS_HARD) << "monitor-enter on non-object (" << reg_type << ")";
   } else if (monitors_.size() >= 32) {
@@ -425,7 +425,7 @@
 }
 
 void RegisterLine::PopMonitor(uint32_t reg_idx) {
-  const RegType& reg_type = GetRegisterType(reg_idx);
+  RegType& reg_type = GetRegisterType(reg_idx);
   if (!reg_type.IsReferenceTypes()) {
     verifier_->Fail(VERIFY_ERROR_BAD_CLASS_HARD) << "monitor-exit on non-object (" << reg_type << ")";
   } else if (monitors_.empty()) {
@@ -460,9 +460,9 @@
   DCHECK(incoming_line != nullptr);
   for (size_t idx = 0; idx < num_regs_; idx++) {
     if (line_[idx] != incoming_line->line_[idx]) {
-      const RegType& incoming_reg_type = incoming_line->GetRegisterType(idx);
-      const RegType& cur_type = GetRegisterType(idx);
-      const RegType& new_type = cur_type.Merge(incoming_reg_type, verifier_->GetRegTypeCache());
+      RegType& incoming_reg_type = incoming_line->GetRegisterType(idx);
+      RegType& cur_type = GetRegisterType(idx);
+      RegType& new_type = cur_type.Merge(incoming_reg_type, verifier_->GetRegTypeCache());
       changed = changed || !cur_type.Equals(new_type);
       line_[idx] = new_type.GetId();
     }
@@ -508,7 +508,8 @@
 
 std::ostream& operator<<(std::ostream& os, const RegisterLine& rhs)
     SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
-  os << rhs.Dump();
+  RegisterLine& rhs_non_const = const_cast<RegisterLine&>(rhs);
+  os << rhs_non_const.Dump();
   return os;
 }
 
diff --git a/runtime/verifier/register_line.h b/runtime/verifier/register_line.h
index 57c7517..b0018d2 100644
--- a/runtime/verifier/register_line.h
+++ b/runtime/verifier/register_line.h
@@ -81,26 +81,26 @@
   // Set the type of register N, verifying that the register is valid.  If "newType" is the "Lo"
   // part of a 64-bit value, register N+1 will be set to "newType+1".
   // The register index was validated during the static pass, so we don't need to check it here.
-  bool SetRegisterType(uint32_t vdst, const RegType& new_type)
+  bool SetRegisterType(uint32_t vdst, RegType& new_type)
       SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
 
-  bool SetRegisterTypeWide(uint32_t vdst, const RegType& new_type1, const RegType& new_type2)
+  bool SetRegisterTypeWide(uint32_t vdst, RegType& new_type1, RegType& new_type2)
       SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
 
   /* Set the type of the "result" register. */
-  void SetResultRegisterType(const RegType& new_type)
+  void SetResultRegisterType(RegType& new_type)
       SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
 
-  void SetResultRegisterTypeWide(const RegType& new_type1, const RegType& new_type2)
+  void SetResultRegisterTypeWide(RegType& new_type1, RegType& new_type2)
       SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
 
   // Get the type of register vsrc.
-  const RegType& GetRegisterType(uint32_t vsrc) const;
+  RegType& GetRegisterType(uint32_t vsrc) const;
 
-  bool VerifyRegisterType(uint32_t vsrc, const RegType& check_type)
+  bool VerifyRegisterType(uint32_t vsrc, RegType& check_type)
       SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
 
-  bool VerifyRegisterTypeWide(uint32_t vsrc, const RegType& check_type1, const RegType& check_type2)
+  bool VerifyRegisterTypeWide(uint32_t vsrc, RegType& check_type1, RegType& check_type2)
       SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
 
   void CopyFromLine(const RegisterLine* src) {
@@ -110,7 +110,7 @@
     reg_to_lock_depths_ = src->reg_to_lock_depths_;
   }
 
-  std::string Dump() const SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+  std::string Dump() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
 
   void FillWithGarbage() {
     memset(&line_, 0xf1, num_regs_ * sizeof(uint16_t));
@@ -126,7 +126,7 @@
    * to prevent them from being used (otherwise, MarkRefsAsInitialized would mark the old ones and
    * the new ones at the same time).
    */
-  void MarkUninitRefsAsInvalid(const RegType& uninit_type)
+  void MarkUninitRefsAsInvalid(RegType& uninit_type)
       SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
 
   /*
@@ -134,7 +134,7 @@
    * reference type. This is called when an appropriate constructor is invoked -- all copies of
    * the reference must be marked as initialized.
    */
-  void MarkRefsAsInitialized(const RegType& uninit_type)
+  void MarkRefsAsInitialized(RegType& uninit_type)
       SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
 
   /*
@@ -173,30 +173,30 @@
    * The argument count is in vA, and the first argument is in vC, for both "simple" and "range"
    * versions. We just need to make sure vA is >= 1 and then return vC.
    */
-  const RegType& GetInvocationThis(const Instruction* inst, bool is_range)
+  RegType& GetInvocationThis(const Instruction* inst, bool is_range)
       SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
 
   /*
    * Verify types for a simple two-register instruction (e.g. "neg-int").
    * "dst_type" is stored into vA, and "src_type" is verified against vB.
    */
-  void CheckUnaryOp(const Instruction* inst, const RegType& dst_type,
-                    const RegType& src_type)
+  void CheckUnaryOp(const Instruction* inst, RegType& dst_type,
+                    RegType& src_type)
       SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
 
   void CheckUnaryOpWide(const Instruction* inst,
-                        const RegType& dst_type1, const RegType& dst_type2,
-                        const RegType& src_type1, const RegType& src_type2)
+                        RegType& dst_type1, RegType& dst_type2,
+                        RegType& src_type1, RegType& src_type2)
       SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
 
   void CheckUnaryOpToWide(const Instruction* inst,
-                          const RegType& dst_type1, const RegType& dst_type2,
-                          const RegType& src_type)
+                          RegType& dst_type1, RegType& dst_type2,
+                          RegType& src_type)
       SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
 
   void CheckUnaryOpFromWide(const Instruction* inst,
-                            const RegType& dst_type,
-                            const RegType& src_type1, const RegType& src_type2)
+                            RegType& dst_type,
+                            RegType& src_type1, RegType& src_type2)
       SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
 
   /*
@@ -205,19 +205,19 @@
    * against vB/vC.
    */
   void CheckBinaryOp(const Instruction* inst,
-                     const RegType& dst_type, const RegType& src_type1, const RegType& src_type2,
+                     RegType& dst_type, RegType& src_type1, RegType& src_type2,
                      bool check_boolean_op)
       SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
 
   void CheckBinaryOpWide(const Instruction* inst,
-                         const RegType& dst_type1, const RegType& dst_type2,
-                         const RegType& src_type1_1, const RegType& src_type1_2,
-                         const RegType& src_type2_1, const RegType& src_type2_2)
+                         RegType& dst_type1, RegType& dst_type2,
+                         RegType& src_type1_1, RegType& src_type1_2,
+                         RegType& src_type2_1, RegType& src_type2_2)
       SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
 
   void CheckBinaryOpWideShift(const Instruction* inst,
-                              const RegType& long_lo_type, const RegType& long_hi_type,
-                              const RegType& int_type)
+                              RegType& long_lo_type, RegType& long_hi_type,
+                              RegType& int_type)
       SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
 
   /*
@@ -225,20 +225,20 @@
    * are verified against vA/vB, then "dst_type" is stored into vA.
    */
   void CheckBinaryOp2addr(const Instruction* inst,
-                          const RegType& dst_type,
-                          const RegType& src_type1, const RegType& src_type2,
+                          RegType& dst_type,
+                          RegType& src_type1, RegType& src_type2,
                           bool check_boolean_op)
       SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
 
   void CheckBinaryOp2addrWide(const Instruction* inst,
-                              const RegType& dst_type1, const RegType& dst_type2,
-                              const RegType& src_type1_1, const RegType& src_type1_2,
-                              const RegType& src_type2_1, const RegType& src_type2_2)
+                              RegType& dst_type1, RegType& dst_type2,
+                              RegType& src_type1_1, RegType& src_type1_2,
+                              RegType& src_type2_1, RegType& src_type2_2)
       SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
 
   void CheckBinaryOp2addrWideShift(const Instruction* inst,
-                                   const RegType& long_lo_type, const RegType& long_hi_type,
-                                   const RegType& int_type)
+                                   RegType& long_lo_type, RegType& long_hi_type,
+                                   RegType& int_type)
       SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
 
   /*
@@ -248,7 +248,7 @@
    * If "check_boolean_op" is set, we use the constant value in vC.
    */
   void CheckLiteralOp(const Instruction* inst,
-                      const RegType& dst_type, const RegType& src_type,
+                      RegType& dst_type, RegType& src_type,
                       bool check_boolean_op, bool is_lit16)
       SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
 
diff --git a/test/003-omnibus-opcodes/src/IntMath.java b/test/003-omnibus-opcodes/src/IntMath.java
index 2e2962a..ad540fd 100644
--- a/test/003-omnibus-opcodes/src/IntMath.java
+++ b/test/003-omnibus-opcodes/src/IntMath.java
@@ -335,8 +335,8 @@
                        special = (start+i) / 15;
                        break;
                }
+               Main.assertTrue(normal == special);
            }
-           Main.assertTrue(normal == special);
        }
     }
 
diff --git a/test/004-JniTest/jni_test.cc b/test/004-JniTest/jni_test.cc
index 4909a4a..554712a 100644
--- a/test/004-JniTest/jni_test.cc
+++ b/test/004-JniTest/jni_test.cc
@@ -286,3 +286,8 @@
 
   return char_returns[c1];
 }
+
+extern "C" JNIEXPORT jboolean JNICALL Java_Main_nativeIsAssignableFrom(JNIEnv* env, jclass,
+                                                                       jclass from, jclass to) {
+  return env->IsAssignableFrom(from, to);
+}
diff --git a/test/004-JniTest/src/Main.java b/test/004-JniTest/src/Main.java
index 11c80f5..ae133be 100644
--- a/test/004-JniTest/src/Main.java
+++ b/test/004-JniTest/src/Main.java
@@ -29,6 +29,7 @@
         testShortMethod();
         testBooleanMethod();
         testCharMethod();
+        testIsAssignableFromOnPrimitiveTypes();
     }
 
     private static native void testFindClassOnAttachedNativeThread();
@@ -151,4 +152,19 @@
         }
       }
     }
+
+    // http://b/16531674
+    private static void testIsAssignableFromOnPrimitiveTypes() {
+      if (!nativeIsAssignableFrom(int.class, Integer.TYPE)) {
+        System.out.println("IsAssignableFrom(int.class, Integer.TYPE) returned false, expected true");
+        throw new AssertionError();
+      }
+
+      if (!nativeIsAssignableFrom(Integer.TYPE, int.class)) {
+        System.out.println("IsAssignableFrom(Integer.TYPE, int.class) returned false, expected true");
+        throw new AssertionError();
+      }
+    }
+
+    native static boolean nativeIsAssignableFrom(Class<?> from, Class<?> to);
 }
diff --git a/test/082-inline-execute/src/Main.java b/test/082-inline-execute/src/Main.java
index 9ecc0a0..56972ff 100644
--- a/test/082-inline-execute/src/Main.java
+++ b/test/082-inline-execute/src/Main.java
@@ -34,6 +34,11 @@
     test_Math_max_F();
     test_Math_min_D();
     test_Math_max_D();
+    test_Math_ceil();
+    test_Math_floor();
+    test_Math_rint();
+    test_Math_round_D();
+    test_Math_round_F();
     test_Short_reverseBytes();
     test_Integer_reverseBytes();
     test_Long_reverseBytes();
@@ -49,6 +54,11 @@
     test_StrictMath_max_F();
     test_StrictMath_min_D();
     test_StrictMath_max_D();
+    test_StrictMath_ceil();
+    test_StrictMath_floor();
+    test_StrictMath_rint();
+    test_StrictMath_round_D();
+    test_StrictMath_round_F();
     test_String_charAt();
     test_String_compareTo();
     test_String_indexOf();
@@ -376,6 +386,104 @@
     Assert.assertEquals(Math.max(Double.MIN_VALUE, Double.MAX_VALUE), Double.MAX_VALUE);
   }
 
+  public static void test_Math_ceil() {
+    Assert.assertEquals(Math.ceil(+0.0), +0.0d, 0.0);
+    Assert.assertEquals(Math.ceil(-0.0), -0.0d, 0.0);
+    Assert.assertEquals(Math.ceil(-0.9), -0.0d, 0.0);
+    Assert.assertEquals(Math.ceil(-0.5), -0.0d, 0.0);
+    Assert.assertEquals(Math.ceil(0.0), -0.0d, 0.0);
+    Assert.assertEquals(Math.ceil(+2.0), +2.0d, 0.0);
+    Assert.assertEquals(Math.ceil(+2.1), +3.0d, 0.0);
+    Assert.assertEquals(Math.ceil(+2.5), +3.0d, 0.0);
+    Assert.assertEquals(Math.ceil(+2.9), +3.0d, 0.0);
+    Assert.assertEquals(Math.ceil(+3.0), +3.0d, 0.0);
+    Assert.assertEquals(Math.ceil(-2.0), -2.0d, 0.0);
+    Assert.assertEquals(Math.ceil(-2.1), -2.0d, 0.0);
+    Assert.assertEquals(Math.ceil(-2.5), -2.0d, 0.0);
+    Assert.assertEquals(Math.ceil(-2.9), -2.0d, 0.0);
+    Assert.assertEquals(Math.ceil(-3.0), -3.0d, 0.0);
+    Assert.assertEquals(Math.ceil(Double.NaN), Double.NaN, 0.0);
+    Assert.assertEquals(Math.ceil(Double.POSITIVE_INFINITY), Double.POSITIVE_INFINITY, 0.0);
+    Assert.assertEquals(Math.ceil(Double.NEGATIVE_INFINITY), Double.NEGATIVE_INFINITY, 0.0);
+  }
+
+  public static void test_Math_floor() {
+    Assert.assertEquals(Math.floor(+0.0), +0.0d, 0.0);
+    Assert.assertEquals(Math.floor(-0.0), -0.0d, 0.0);
+    Assert.assertEquals(Math.floor(+2.0), +2.0d, 0.0);
+    Assert.assertEquals(Math.floor(+2.1), +2.0d, 0.0);
+    Assert.assertEquals(Math.floor(+2.5), +2.0d, 0.0);
+    Assert.assertEquals(Math.floor(+2.9), +2.0d, 0.0);
+    Assert.assertEquals(Math.floor(+3.0), +3.0d, 0.0);
+    Assert.assertEquals(Math.floor(-2.0), -2.0d, 0.0);
+    Assert.assertEquals(Math.floor(-2.1), -3.0d, 0.0);
+    Assert.assertEquals(Math.floor(-2.5), -3.0d, 0.0);
+    Assert.assertEquals(Math.floor(-2.9), -3.0d, 0.0);
+    Assert.assertEquals(Math.floor(-3.0), -3.0d, 0.0);
+    Assert.assertEquals(Math.floor(Double.NaN), Double.NaN, 0.0);
+    Assert.assertEquals(Math.floor(Double.POSITIVE_INFINITY), Double.POSITIVE_INFINITY, 0.0);
+    Assert.assertEquals(Math.floor(Double.NEGATIVE_INFINITY), Double.NEGATIVE_INFINITY, 0.0);
+  }
+
+  public static void test_Math_rint() {
+    Assert.assertEquals(Math.rint(+0.0), +0.0d, 0.0);
+    Assert.assertEquals(Math.rint(-0.0), -0.0d, 0.0);
+    Assert.assertEquals(Math.rint(+2.0), +2.0d, 0.0);
+    Assert.assertEquals(Math.rint(+2.1), +2.0d, 0.0);
+    Assert.assertEquals(Math.rint(+2.5), +2.0d, 0.0);
+    Assert.assertEquals(Math.rint(+2.9), +3.0d, 0.0);
+    Assert.assertEquals(Math.rint(+3.0), +3.0d, 0.0);
+    Assert.assertEquals(Math.rint(-2.0), -2.0d, 0.0);
+    Assert.assertEquals(Math.rint(-2.1), -2.0d, 0.0);
+    Assert.assertEquals(Math.rint(-2.5), -2.0d, 0.0);
+    Assert.assertEquals(Math.rint(-2.9), -3.0d, 0.0);
+    Assert.assertEquals(Math.rint(-3.0), -3.0d, 0.0);
+    Assert.assertEquals(Math.rint(Double.NaN), Double.NaN, 0.0);
+    Assert.assertEquals(Math.rint(Double.POSITIVE_INFINITY), Double.POSITIVE_INFINITY, 0.0);
+    Assert.assertEquals(Math.rint(Double.NEGATIVE_INFINITY), Double.NEGATIVE_INFINITY, 0.0);
+  }
+
+  public static void test_Math_round_D() {
+    Assert.assertEquals(Math.round(+0.0d), (long)+0.0);
+    Assert.assertEquals(Math.round(-0.0d), (long)+0.0);
+    Assert.assertEquals(Math.round(2.0d), 2l);
+    Assert.assertEquals(Math.round(2.1d), 2l);
+    Assert.assertEquals(Math.round(2.5d), 3l);
+    Assert.assertEquals(Math.round(2.9d), 3l);
+    Assert.assertEquals(Math.round(3.0d), 3l);
+    Assert.assertEquals(Math.round(-2.0d), -2l);
+    Assert.assertEquals(Math.round(-2.1d), -2l);
+    Assert.assertEquals(Math.round(-2.5d), -2l);
+    Assert.assertEquals(Math.round(-2.9d), -3l);
+    Assert.assertEquals(Math.round(-3.0d), -3l);
+    Assert.assertEquals(Math.round(0.49999999999999994d), 1l);
+    Assert.assertEquals(Math.round(Double.NaN), (long)+0.0d);
+    Assert.assertEquals(Math.round(Long.MAX_VALUE + 1.0d), Long.MAX_VALUE);
+    Assert.assertEquals(Math.round(Long.MIN_VALUE - 1.0d), Long.MIN_VALUE);
+    Assert.assertEquals(Math.round(Double.POSITIVE_INFINITY), Long.MAX_VALUE);
+    Assert.assertEquals(Math.round(Double.NEGATIVE_INFINITY), Long.MIN_VALUE);
+  }
+
+  public static void test_Math_round_F() {
+    Assert.assertEquals(Math.round(+0.0f), (int)+0.0);
+    Assert.assertEquals(Math.round(-0.0f), (int)+0.0);
+    Assert.assertEquals(Math.round(2.0f), 2);
+    Assert.assertEquals(Math.round(2.1f), 2);
+    Assert.assertEquals(Math.round(2.5f), 3);
+    Assert.assertEquals(Math.round(2.9f), 3);
+    Assert.assertEquals(Math.round(3.0f), 3);
+    Assert.assertEquals(Math.round(-2.0f), -2);
+    Assert.assertEquals(Math.round(-2.1f), -2);
+    Assert.assertEquals(Math.round(-2.5f), -2);
+    Assert.assertEquals(Math.round(-2.9f), -3);
+    Assert.assertEquals(Math.round(-3.0f), -3);
+    Assert.assertEquals(Math.round(Float.NaN), (int)+0.0f);
+    Assert.assertEquals(Math.round(Integer.MAX_VALUE + 1.0f), Integer.MAX_VALUE);
+    Assert.assertEquals(Math.round(Integer.MIN_VALUE - 1.0f), Integer.MIN_VALUE);
+    Assert.assertEquals(Math.round(Float.POSITIVE_INFINITY), Integer.MAX_VALUE);
+    Assert.assertEquals(Math.round(Float.NEGATIVE_INFINITY), Integer.MIN_VALUE);
+  }
+
   public static void test_StrictMath_abs_I() {
     Assert.assertEquals(StrictMath.abs(0), 0);
     Assert.assertEquals(StrictMath.abs(123), 123);
@@ -487,6 +595,104 @@
     Assert.assertEquals(StrictMath.max(Double.MIN_VALUE, Double.MAX_VALUE), Double.MAX_VALUE);
   }
 
+  public static void test_StrictMath_ceil() {
+    Assert.assertEquals(StrictMath.ceil(+0.0), +0.0d, 0.0);
+    Assert.assertEquals(StrictMath.ceil(-0.0), -0.0d, 0.0);
+    Assert.assertEquals(StrictMath.ceil(-0.9), -0.0d, 0.0);
+    Assert.assertEquals(StrictMath.ceil(-0.5), -0.0d, 0.0);
+    Assert.assertEquals(StrictMath.ceil(0.0), -0.0d, 0.0);
+    Assert.assertEquals(StrictMath.ceil(+2.0), +2.0d, 0.0);
+    Assert.assertEquals(StrictMath.ceil(+2.1), +3.0d, 0.0);
+    Assert.assertEquals(StrictMath.ceil(+2.5), +3.0d, 0.0);
+    Assert.assertEquals(StrictMath.ceil(+2.9), +3.0d, 0.0);
+    Assert.assertEquals(StrictMath.ceil(+3.0), +3.0d, 0.0);
+    Assert.assertEquals(StrictMath.ceil(-2.0), -2.0d, 0.0);
+    Assert.assertEquals(StrictMath.ceil(-2.1), -2.0d, 0.0);
+    Assert.assertEquals(StrictMath.ceil(-2.5), -2.0d, 0.0);
+    Assert.assertEquals(StrictMath.ceil(-2.9), -2.0d, 0.0);
+    Assert.assertEquals(StrictMath.ceil(-3.0), -3.0d, 0.0);
+    Assert.assertEquals(StrictMath.ceil(Double.NaN), Double.NaN, 0.0);
+    Assert.assertEquals(StrictMath.ceil(Double.POSITIVE_INFINITY), Double.POSITIVE_INFINITY, 0.0);
+    Assert.assertEquals(StrictMath.ceil(Double.NEGATIVE_INFINITY), Double.NEGATIVE_INFINITY, 0.0);
+  }
+
+  public static void test_StrictMath_floor() {
+    Assert.assertEquals(StrictMath.floor(+0.0), +0.0d, 0.0);
+    Assert.assertEquals(StrictMath.floor(-0.0), -0.0d, 0.0);
+    Assert.assertEquals(StrictMath.floor(+2.0), +2.0d, 0.0);
+    Assert.assertEquals(StrictMath.floor(+2.1), +2.0d, 0.0);
+    Assert.assertEquals(StrictMath.floor(+2.5), +2.0d, 0.0);
+    Assert.assertEquals(StrictMath.floor(+2.9), +2.0d, 0.0);
+    Assert.assertEquals(StrictMath.floor(+3.0), +3.0d, 0.0);
+    Assert.assertEquals(StrictMath.floor(-2.0), -2.0d, 0.0);
+    Assert.assertEquals(StrictMath.floor(-2.1), -3.0d, 0.0);
+    Assert.assertEquals(StrictMath.floor(-2.5), -3.0d, 0.0);
+    Assert.assertEquals(StrictMath.floor(-2.9), -3.0d, 0.0);
+    Assert.assertEquals(StrictMath.floor(-3.0), -3.0d, 0.0);
+    Assert.assertEquals(StrictMath.floor(Double.NaN), Double.NaN, 0.0);
+    Assert.assertEquals(StrictMath.floor(Double.POSITIVE_INFINITY), Double.POSITIVE_INFINITY, 0.0);
+    Assert.assertEquals(StrictMath.floor(Double.NEGATIVE_INFINITY), Double.NEGATIVE_INFINITY, 0.0);
+  }
+
+  public static void test_StrictMath_rint() {
+    Assert.assertEquals(StrictMath.rint(+0.0), +0.0d, 0.0);
+    Assert.assertEquals(StrictMath.rint(-0.0), -0.0d, 0.0);
+    Assert.assertEquals(StrictMath.rint(+2.0), +2.0d, 0.0);
+    Assert.assertEquals(StrictMath.rint(+2.1), +2.0d, 0.0);
+    Assert.assertEquals(StrictMath.rint(+2.5), +2.0d, 0.0);
+    Assert.assertEquals(StrictMath.rint(+2.9), +3.0d, 0.0);
+    Assert.assertEquals(StrictMath.rint(+3.0), +3.0d, 0.0);
+    Assert.assertEquals(StrictMath.rint(-2.0), -2.0d, 0.0);
+    Assert.assertEquals(StrictMath.rint(-2.1), -2.0d, 0.0);
+    Assert.assertEquals(StrictMath.rint(-2.5), -2.0d, 0.0);
+    Assert.assertEquals(StrictMath.rint(-2.9), -3.0d, 0.0);
+    Assert.assertEquals(StrictMath.rint(-3.0), -3.0d, 0.0);
+    Assert.assertEquals(StrictMath.rint(Double.NaN), Double.NaN, 0.0);
+    Assert.assertEquals(StrictMath.rint(Double.POSITIVE_INFINITY), Double.POSITIVE_INFINITY, 0.0);
+    Assert.assertEquals(StrictMath.rint(Double.NEGATIVE_INFINITY), Double.NEGATIVE_INFINITY, 0.0);
+  }
+
+  public static void test_StrictMath_round_D() {
+    Assert.assertEquals(StrictMath.round(+0.0d), (long)+0.0);
+    Assert.assertEquals(StrictMath.round(-0.0d), (long)+0.0);
+    Assert.assertEquals(StrictMath.round(2.0d), 2l);
+    Assert.assertEquals(StrictMath.round(2.1d), 2l);
+    Assert.assertEquals(StrictMath.round(2.5d), 3l);
+    Assert.assertEquals(StrictMath.round(2.9d), 3l);
+    Assert.assertEquals(StrictMath.round(3.0d), 3l);
+    Assert.assertEquals(StrictMath.round(-2.0d), -2l);
+    Assert.assertEquals(StrictMath.round(-2.1d), -2l);
+    Assert.assertEquals(StrictMath.round(-2.5d), -2l);
+    Assert.assertEquals(StrictMath.round(-2.9d), -3l);
+    Assert.assertEquals(StrictMath.round(-3.0d), -3l);
+    Assert.assertEquals(StrictMath.round(0.49999999999999994d), 1l);
+    Assert.assertEquals(StrictMath.round(Double.NaN), (long)+0.0d);
+    Assert.assertEquals(StrictMath.round(Long.MAX_VALUE + 1.0d), Long.MAX_VALUE);
+    Assert.assertEquals(StrictMath.round(Long.MIN_VALUE - 1.0d), Long.MIN_VALUE);
+    Assert.assertEquals(StrictMath.round(Double.POSITIVE_INFINITY), Long.MAX_VALUE);
+    Assert.assertEquals(StrictMath.round(Double.NEGATIVE_INFINITY), Long.MIN_VALUE);
+  }
+
+  public static void test_StrictMath_round_F() {
+    Assert.assertEquals(StrictMath.round(+0.0f), (int)+0.0);
+    Assert.assertEquals(StrictMath.round(-0.0f), (int)+0.0);
+    Assert.assertEquals(StrictMath.round(2.0f), 2);
+    Assert.assertEquals(StrictMath.round(2.1f), 2);
+    Assert.assertEquals(StrictMath.round(2.5f), 3);
+    Assert.assertEquals(StrictMath.round(2.9f), 3);
+    Assert.assertEquals(StrictMath.round(3.0f), 3);
+    Assert.assertEquals(StrictMath.round(-2.0f), -2);
+    Assert.assertEquals(StrictMath.round(-2.1f), -2);
+    Assert.assertEquals(StrictMath.round(-2.5f), -2);
+    Assert.assertEquals(StrictMath.round(-2.9f), -3);
+    Assert.assertEquals(StrictMath.round(-3.0f), -3);
+    Assert.assertEquals(StrictMath.round(Float.NaN), (int)+0.0f);
+    Assert.assertEquals(StrictMath.round(Integer.MAX_VALUE + 1.0f), Integer.MAX_VALUE);
+    Assert.assertEquals(StrictMath.round(Integer.MIN_VALUE - 1.0f), Integer.MIN_VALUE);
+    Assert.assertEquals(StrictMath.round(Float.POSITIVE_INFINITY), Integer.MAX_VALUE);
+    Assert.assertEquals(StrictMath.round(Float.NEGATIVE_INFINITY), Integer.MIN_VALUE);
+  }
+
   public static void test_Float_floatToRawIntBits() {
     Assert.assertEquals(Float.floatToRawIntBits(-1.0f), 0xbf800000);
     Assert.assertEquals(Float.floatToRawIntBits(0.0f), 0);
diff --git a/test/115-native-bridge/expected.txt b/test/115-native-bridge/expected.txt
index f852620..5b41606 100644
--- a/test/115-native-bridge/expected.txt
+++ b/test/115-native-bridge/expected.txt
@@ -1,13 +1,55 @@
 Ready for native bridge tests.
 Native bridge initialized.
 Checking for support.
-Getting trampoline.
-Getting trampoline.
-Getting trampoline.
-Getting trampoline.
-Getting trampoline.
-Getting trampoline.
-Getting trampoline.
-Getting trampoline.
-Getting trampoline.
-Getting trampoline.
+Getting trampoline for JNI_OnLoad with shorty (null).
+Test ART callbacks: all JNI function number is 9.
+    name:booleanMethod, signature:(ZZZZZZZZZZ)Z, shorty:ZZZZZZZZZZZ.
+    name:byteMethod, signature:(BBBBBBBBBB)B, shorty:BBBBBBBBBBB.
+    name:charMethod, signature:(CCCCCCCCCC)C, shorty:CCCCCCCCCCC.
+    name:shortMethod, signature:(SSSSSSSSSS)S, shorty:SSSSSSSSSSS.
+    name:testCallStaticVoidMethodOnSubClassNative, signature:()V, shorty:V.
+    name:testFindClassOnAttachedNativeThread, signature:()V, shorty:V.
+    name:testFindFieldOnAttachedNativeThreadNative, signature:()V, shorty:V.
+    name:testGetMirandaMethodNative, signature:()Ljava/lang/reflect/Method;, shorty:L.
+    name:testZeroLengthByteBuffers, signature:()V, shorty:V.
+trampoline_JNI_OnLoad called!
+Getting trampoline for Java_Main_testFindClassOnAttachedNativeThread with shorty V.
+trampoline_Java_Main_testFindClassOnAttachedNativeThread called!
+Getting trampoline for Java_Main_testFindFieldOnAttachedNativeThreadNative with shorty V.
+trampoline_Java_Main_testFindFieldOnAttachedNativeThreadNative called!
+Getting trampoline for Java_Main_testCallStaticVoidMethodOnSubClassNative with shorty V.
+trampoline_Java_Main_testCallStaticVoidMethodOnSubClassNative called!
+Getting trampoline for Java_Main_testGetMirandaMethodNative with shorty L.
+trampoline_Java_Main_testGetMirandaMethodNative called!
+Getting trampoline for Java_Main_testZeroLengthByteBuffers with shorty V.
+trampoline_Java_Main_testZeroLengthByteBuffers called!
+Getting trampoline for Java_Main_byteMethod with shorty BBBBBBBBBBB.
+trampoline_Java_Main_byteMethod called!
+trampoline_Java_Main_byteMethod called!
+trampoline_Java_Main_byteMethod called!
+trampoline_Java_Main_byteMethod called!
+trampoline_Java_Main_byteMethod called!
+trampoline_Java_Main_byteMethod called!
+trampoline_Java_Main_byteMethod called!
+Getting trampoline for Java_Main_shortMethod with shorty SSSSSSSSSSS.
+trampoline_Java_Main_shortMethod called!
+trampoline_Java_Main_shortMethod called!
+trampoline_Java_Main_shortMethod called!
+trampoline_Java_Main_shortMethod called!
+trampoline_Java_Main_shortMethod called!
+trampoline_Java_Main_shortMethod called!
+trampoline_Java_Main_shortMethod called!
+trampoline_Java_Main_shortMethod called!
+trampoline_Java_Main_shortMethod called!
+Getting trampoline for Java_Main_booleanMethod with shorty ZZZZZZZZZZZ.
+trampoline_Java_Main_booleanMethod called!
+trampoline_Java_Main_booleanMethod called!
+Getting trampoline for Java_Main_charMethod with shorty CCCCCCCCCCC.
+trampoline_Java_Main_charMethod called!
+trampoline_Java_Main_charMethod called!
+trampoline_Java_Main_charMethod called!
+trampoline_Java_Main_charMethod called!
+trampoline_Java_Main_charMethod called!
+trampoline_Java_Main_charMethod called!
+trampoline_Java_Main_charMethod called!
+trampoline_Java_Main_charMethod called!
diff --git a/test/115-native-bridge/nativebridge.cc b/test/115-native-bridge/nativebridge.cc
index bd3ae13..82211a5 100644
--- a/test/115-native-bridge/nativebridge.cc
+++ b/test/115-native-bridge/nativebridge.cc
@@ -44,13 +44,192 @@
   bool (*isSupported)(const char* libpath);
 };
 
+struct NativeBridgeMethod {
+  const char* name;
+  const char* signature;
+  bool static_method;
+  void* fnPtr;
+  void* trampoline;
+};
 
+static NativeBridgeMethod* find_native_bridge_method(const char *name);
+static NativeBridgeArtCallbacks* gNativeBridgeArtCallbacks;
 
-static std::vector<void*> symbols;
+static jint trampoline_JNI_OnLoad(JavaVM* vm, void* reserved) {
+  JNIEnv* env = nullptr;
+  typedef jint (*FnPtr_t)(JavaVM*, void*);
+  FnPtr_t fnPtr = reinterpret_cast<FnPtr_t>(find_native_bridge_method("JNI_OnLoad")->fnPtr);
+
+  vm->GetEnv(reinterpret_cast<void **>(&env), JNI_VERSION_1_6);
+  if (env == nullptr) {
+    return 0;
+  }
+
+  jclass klass = env->FindClass("Main");
+  if (klass != nullptr) {
+    int i, count1, count2;
+    count1 = gNativeBridgeArtCallbacks->getNativeMethodCount(env, klass);
+    std::unique_ptr<JNINativeMethod[]> methods(new JNINativeMethod[count1]);
+    if (methods == nullptr) {
+      return 0;
+    }
+    count2 = gNativeBridgeArtCallbacks->getNativeMethods(env, klass, methods.get(), count1);
+    if (count1 == count2) {
+      printf("Test ART callbacks: all JNI function number is %d.\n", count1);
+    }
+
+    for (i = 0; i < count1; i++) {
+      NativeBridgeMethod* nb_method = find_native_bridge_method(methods[i].name);
+      if (nb_method != nullptr) {
+        jmethodID mid = nullptr;
+        if (nb_method->static_method) {
+          mid = env->GetStaticMethodID(klass, methods[i].name, nb_method->signature);
+        } else {
+          mid = env->GetMethodID(klass, methods[i].name, nb_method->signature);
+        }
+        if (mid != nullptr) {
+          const char* shorty = gNativeBridgeArtCallbacks->getMethodShorty(env, mid);
+          if (strcmp(shorty, methods[i].signature) == 0) {
+            printf("    name:%s, signature:%s, shorty:%s.\n",
+                   methods[i].name, nb_method->signature, shorty);
+          }
+        }
+      }
+    }
+    methods.release();
+  }
+
+  printf("%s called!\n", __FUNCTION__);
+  return fnPtr(vm, reserved);
+}
+
+static void trampoline_Java_Main_testFindClassOnAttachedNativeThread(JNIEnv* env,
+                                                                     jclass klass) {
+  typedef void (*FnPtr_t)(JNIEnv*, jclass);
+  FnPtr_t fnPtr = reinterpret_cast<FnPtr_t>
+    (find_native_bridge_method("testFindClassOnAttachedNativeThread")->fnPtr);
+  printf("%s called!\n", __FUNCTION__);
+  return fnPtr(env, klass);
+}
+
+static void trampoline_Java_Main_testFindFieldOnAttachedNativeThreadNative(JNIEnv* env,
+                                                                           jclass klass) {
+  typedef void (*FnPtr_t)(JNIEnv*, jclass);
+  FnPtr_t fnPtr = reinterpret_cast<FnPtr_t>
+    (find_native_bridge_method("testFindFieldOnAttachedNativeThreadNative")->fnPtr);
+  printf("%s called!\n", __FUNCTION__);
+  return fnPtr(env, klass);
+}
+
+static void trampoline_Java_Main_testCallStaticVoidMethodOnSubClassNative(JNIEnv* env,
+                                                                          jclass klass) {
+  typedef void (*FnPtr_t)(JNIEnv*, jclass);
+  FnPtr_t fnPtr = reinterpret_cast<FnPtr_t>
+    (find_native_bridge_method("testCallStaticVoidMethodOnSubClassNative")->fnPtr);
+  printf("%s called!\n", __FUNCTION__);
+  return fnPtr(env, klass);
+}
+
+static jobject trampoline_Java_Main_testGetMirandaMethodNative(JNIEnv* env, jclass klass) {
+  typedef jobject (*FnPtr_t)(JNIEnv*, jclass);
+  FnPtr_t fnPtr = reinterpret_cast<FnPtr_t>
+    (find_native_bridge_method("testGetMirandaMethodNative")->fnPtr);
+  printf("%s called!\n", __FUNCTION__);
+  return fnPtr(env, klass);
+}
+
+static void trampoline_Java_Main_testZeroLengthByteBuffers(JNIEnv* env, jclass klass) {
+  typedef void (*FnPtr_t)(JNIEnv*, jclass);
+  FnPtr_t fnPtr = reinterpret_cast<FnPtr_t>
+    (find_native_bridge_method("testZeroLengthByteBuffers")->fnPtr);
+  printf("%s called!\n", __FUNCTION__);
+  return fnPtr(env, klass);
+}
+
+static jbyte trampoline_Java_Main_byteMethod(JNIEnv* env, jclass klass, jbyte b1, jbyte b2,
+                                             jbyte b3, jbyte b4, jbyte b5, jbyte b6,
+                                             jbyte b7, jbyte b8, jbyte b9, jbyte b10) {
+  typedef jbyte (*FnPtr_t)(JNIEnv*, jclass, jbyte, jbyte, jbyte, jbyte, jbyte,
+                           jbyte, jbyte, jbyte, jbyte, jbyte);
+  FnPtr_t fnPtr = reinterpret_cast<FnPtr_t>(find_native_bridge_method("byteMethod")->fnPtr);
+  printf("%s called!\n", __FUNCTION__);
+  return fnPtr(env, klass, b1, b2, b3, b4, b5, b6, b7, b8, b9, b10);
+}
+
+static jshort trampoline_Java_Main_shortMethod(JNIEnv* env, jclass klass, jshort s1, jshort s2,
+                                               jshort s3, jshort s4, jshort s5, jshort s6,
+                                               jshort s7, jshort s8, jshort s9, jshort s10) {
+  typedef jshort (*FnPtr_t)(JNIEnv*, jclass, jshort, jshort, jshort, jshort, jshort,
+                            jshort, jshort, jshort, jshort, jshort);
+  FnPtr_t fnPtr = reinterpret_cast<FnPtr_t>(find_native_bridge_method("shortMethod")->fnPtr);
+  printf("%s called!\n", __FUNCTION__);
+  return fnPtr(env, klass, s1, s2, s3, s4, s5, s6, s7, s8, s9, s10);
+}
+
+static jboolean trampoline_Java_Main_booleanMethod(JNIEnv* env, jclass klass, jboolean b1,
+                                                   jboolean b2, jboolean b3, jboolean b4,
+                                                   jboolean b5, jboolean b6, jboolean b7,
+                                                   jboolean b8, jboolean b9, jboolean b10) {
+  typedef jboolean (*FnPtr_t)(JNIEnv*, jclass, jboolean, jboolean, jboolean, jboolean, jboolean,
+                              jboolean, jboolean, jboolean, jboolean, jboolean);
+  FnPtr_t fnPtr = reinterpret_cast<FnPtr_t>(find_native_bridge_method("booleanMethod")->fnPtr);
+  printf("%s called!\n", __FUNCTION__);
+  return fnPtr(env, klass, b1, b2, b3, b4, b5, b6, b7, b8, b9, b10);
+}
+
+static jchar trampoline_Java_Main_charMethod(JNIEnv* env, jclass klass, jchar c1, jchar c2,
+                                             jchar c3, jchar c4, jchar c5, jchar c6,
+                                             jchar c7, jchar c8, jchar c9, jchar c10) {
+  typedef jchar (*FnPtr_t)(JNIEnv*, jclass, jchar, jchar, jchar, jchar, jchar,
+                           jchar, jchar, jchar, jchar, jchar);
+  FnPtr_t fnPtr = reinterpret_cast<FnPtr_t>(find_native_bridge_method("charMethod")->fnPtr);
+  printf("%s called!\n", __FUNCTION__);
+  return fnPtr(env, klass, c1, c2, c3, c4, c5, c6, c7, c8, c9, c10);
+}
+
+NativeBridgeMethod gNativeBridgeMethods[] = {
+  { "JNI_OnLoad", "", true, nullptr,
+    reinterpret_cast<void*>(trampoline_JNI_OnLoad) },
+  { "booleanMethod", "(ZZZZZZZZZZ)Z", true, nullptr,
+    reinterpret_cast<void*>(trampoline_Java_Main_booleanMethod) },
+  { "byteMethod", "(BBBBBBBBBB)B", true, nullptr,
+    reinterpret_cast<void*>(trampoline_Java_Main_byteMethod) },
+  { "charMethod", "(CCCCCCCCCC)C", true, nullptr,
+    reinterpret_cast<void*>(trampoline_Java_Main_charMethod) },
+  { "shortMethod", "(SSSSSSSSSS)S", true, nullptr,
+    reinterpret_cast<void*>(trampoline_Java_Main_shortMethod) },
+  { "testCallStaticVoidMethodOnSubClassNative", "()V", true, nullptr,
+    reinterpret_cast<void*>(trampoline_Java_Main_testCallStaticVoidMethodOnSubClassNative) },
+  { "testFindClassOnAttachedNativeThread", "()V", true, nullptr,
+    reinterpret_cast<void*>(trampoline_Java_Main_testFindClassOnAttachedNativeThread) },
+  { "testFindFieldOnAttachedNativeThreadNative", "()V", true, nullptr,
+    reinterpret_cast<void*>(trampoline_Java_Main_testFindFieldOnAttachedNativeThreadNative) },
+  { "testGetMirandaMethodNative", "()Ljava/lang/reflect/Method;", true, nullptr,
+    reinterpret_cast<void*>(trampoline_Java_Main_testGetMirandaMethodNative) },
+  { "testZeroLengthByteBuffers", "()V", true, nullptr,
+    reinterpret_cast<void*>(trampoline_Java_Main_testZeroLengthByteBuffers) },
+};
+
+static NativeBridgeMethod* find_native_bridge_method(const char *name) {
+  const char* pname = name;
+  if (strncmp(name, "Java_Main_", 10) == 0) {
+    pname += 10;
+  }
+
+  for (size_t i = 0; i < sizeof(gNativeBridgeMethods) / sizeof(gNativeBridgeMethods[0]); i++) {
+    if (strcmp(pname, gNativeBridgeMethods[i].name) == 0) {
+      return &gNativeBridgeMethods[i];
+    }
+  }
+  return nullptr;
+}
 
 // NativeBridgeCallbacks implementations
 extern "C" bool native_bridge_initialize(NativeBridgeArtCallbacks* art_cbs) {
-  printf("Native bridge initialized.\n");
+  if (art_cbs != nullptr) {
+    gNativeBridgeArtCallbacks = art_cbs;
+    printf("Native bridge initialized.\n");
+  }
   return true;
 }
 
@@ -80,17 +259,16 @@
 
 extern "C" void* native_bridge_getTrampoline(void* handle, const char* name, const char* shorty,
                                              uint32_t len) {
-  printf("Getting trampoline.\n");
+  printf("Getting trampoline for %s with shorty %s.\n", name, shorty);
 
   // The name here is actually the JNI name, so we can directly do the lookup.
   void* sym = dlsym(handle, name);
-  if (sym != nullptr) {
-    symbols.push_back(sym);
-  }
+  NativeBridgeMethod* method = find_native_bridge_method(name);
+  if (method == nullptr)
+    return nullptr;
+  method->fnPtr = sym;
 
-  // As libarttest is the same arch as the host, we can actually directly use the code and do not
-  // need to create a trampoline. :-)
-  return sym;
+  return method->trampoline;
 }
 
 extern "C" bool native_bridge_isSupported(const char* libpath) {
@@ -109,6 +287,3 @@
   .getTrampoline = &native_bridge_getTrampoline,
   .isSupported = &native_bridge_isSupported
 };
-
-
-