ART: Rework quick entrypoint code in Mir2Lir, cleanup

To reduce the complexity of calling trampolines in generic code,
introduce an enumeration for entrypoints. Introduce a header that lists
the entrypoint enum and exposes a templatized method that translates an
enum value to the corresponding thread offset value.

Call helpers are rewritten to have an enum parameter instead of the
thread offset. Also rewrite LoadHelper and GenConversionCall this way.
It is now LoadHelper's duty to select the right thread offset size.

Introduce InvokeTrampoline virtual method to Mir2Lir. This allows to
further simplify the call helpers, as well as make OpThreadMem specific
to X86 only (removed from Mir2Lir).

Make GenInlinedCharAt virtual, move a copy to X86 backend, and simplify
both copies. Remove LoadBaseIndexedDisp and OpRegMem from Mir2Lir, as they
are now specific to X86 only.

Remove StoreBaseIndexedDisp from Mir2Lir, as it was only ever used in the
X86 backend.

Remove OpTlsCmp from Mir2Lir, as it was only ever used in the X86 backend.

Remove OpLea from Mir2Lir, as it was only ever defined in the X86 backend.

Remove GenImmedCheck from Mir2Lir as it was neither used nor implemented.

Change-Id: If0a6182288c5d57653e3979bf547840a4c47626e
diff --git a/compiler/dex/compiler_enums.h b/compiler/dex/compiler_enums.h
index 69adb35..346fbb8 100644
--- a/compiler/dex/compiler_enums.h
+++ b/compiler/dex/compiler_enums.h
@@ -425,10 +425,6 @@
 
 std::ostream& operator<<(std::ostream& os, const X86ConditionCode& kind);
 
-enum ThrowKind {
-  kThrowNoSuchMethod,
-};
-
 enum DividePattern {
   DivideNone,
   Divide3,
diff --git a/compiler/dex/quick/arm/codegen_arm.h b/compiler/dex/quick/arm/codegen_arm.h
index fa252a1..9652192 100644
--- a/compiler/dex/quick/arm/codegen_arm.h
+++ b/compiler/dex/quick/arm/codegen_arm.h
@@ -31,22 +31,17 @@
                             RegLocation rl_dest, int lit);
     bool EasyMultiply(RegLocation rl_src, RegLocation rl_dest, int lit) OVERRIDE;
     LIR* CheckSuspendUsingLoad() OVERRIDE;
-    RegStorage LoadHelper(ThreadOffset<4> offset) OVERRIDE;
-    RegStorage LoadHelper(ThreadOffset<8> offset) OVERRIDE;
+    RegStorage LoadHelper(QuickEntrypointEnum trampoline) OVERRIDE;
     LIR* LoadBaseDisp(RegStorage r_base, int displacement, RegStorage r_dest,
                       OpSize size, VolatileKind is_volatile) OVERRIDE;
     LIR* LoadBaseIndexed(RegStorage r_base, RegStorage r_index, RegStorage r_dest, int scale,
                          OpSize size) OVERRIDE;
-    LIR* LoadBaseIndexedDisp(RegStorage r_base, RegStorage r_index, int scale, int displacement,
-                             RegStorage r_dest, OpSize size) OVERRIDE;
     LIR* LoadConstantNoClobber(RegStorage r_dest, int value);
     LIR* LoadConstantWide(RegStorage r_dest, int64_t value);
     LIR* StoreBaseDisp(RegStorage r_base, int displacement, RegStorage r_src,
                        OpSize size, VolatileKind is_volatile) OVERRIDE;
     LIR* StoreBaseIndexed(RegStorage r_base, RegStorage r_index, RegStorage r_src, int scale,
                           OpSize size) OVERRIDE;
-    LIR* StoreBaseIndexedDisp(RegStorage r_base, RegStorage r_index, int scale, int displacement,
-                              RegStorage r_src, OpSize size) OVERRIDE;
     void MarkGCCard(RegStorage val_reg, RegStorage tgt_addr_reg);
 
     // Required for target - register utilities.
@@ -168,7 +163,6 @@
     void OpRegCopy(RegStorage r_dest, RegStorage r_src);
     LIR* OpRegCopyNoInsert(RegStorage r_dest, RegStorage r_src);
     LIR* OpRegImm(OpKind op, RegStorage r_dest_src1, int value);
-    LIR* OpRegMem(OpKind op, RegStorage r_dest, RegStorage r_base, int offset);
     LIR* OpRegReg(OpKind op, RegStorage r_dest_src1, RegStorage r_src2);
     LIR* OpMovRegMem(RegStorage r_dest, RegStorage r_base, int offset, MoveType move_type);
     LIR* OpMovMemReg(RegStorage r_base, int offset, RegStorage r_src, MoveType move_type);
@@ -176,14 +170,9 @@
     LIR* OpRegRegImm(OpKind op, RegStorage r_dest, RegStorage r_src1, int value);
     LIR* OpRegRegReg(OpKind op, RegStorage r_dest, RegStorage r_src1, RegStorage r_src2);
     LIR* OpTestSuspend(LIR* target);
-    LIR* OpThreadMem(OpKind op, ThreadOffset<4> thread_offset) OVERRIDE;
-    LIR* OpThreadMem(OpKind op, ThreadOffset<8> thread_offset) OVERRIDE;
     LIR* OpVldm(RegStorage r_base, int count);
     LIR* OpVstm(RegStorage r_base, int count);
-    void OpLea(RegStorage r_base, RegStorage reg1, RegStorage reg2, int scale, int offset);
     void OpRegCopyWide(RegStorage dest, RegStorage src);
-    void OpTlsCmp(ThreadOffset<4> offset, int val) OVERRIDE;
-    void OpTlsCmp(ThreadOffset<8> offset, int val) OVERRIDE;
 
     LIR* LoadBaseDispBody(RegStorage r_base, int displacement, RegStorage r_dest, OpSize size);
     LIR* StoreBaseDispBody(RegStorage r_base, int displacement, RegStorage r_src, OpSize size);
@@ -208,6 +197,8 @@
       return false;  // Wide FPRs are formed by pairing.
     }
 
+    LIR* InvokeTrampoline(OpKind op, RegStorage r_tgt, QuickEntrypointEnum trampoline) OVERRIDE;
+
   private:
     void GenFusedLongCmpImmBranch(BasicBlock* bb, RegLocation rl_src1, int64_t val,
                                   ConditionCode ccode);
diff --git a/compiler/dex/quick/arm/fp_arm.cc b/compiler/dex/quick/arm/fp_arm.cc
index dcb8857..2ad11da 100644
--- a/compiler/dex/quick/arm/fp_arm.cc
+++ b/compiler/dex/quick/arm/fp_arm.cc
@@ -49,8 +49,7 @@
     case Instruction::REM_FLOAT_2ADDR:
     case Instruction::REM_FLOAT:
       FlushAllRegs();   // Send everything to home location
-      CallRuntimeHelperRegLocationRegLocation(QUICK_ENTRYPOINT_OFFSET(4, pFmodf), rl_src1, rl_src2,
-                                              false);
+      CallRuntimeHelperRegLocationRegLocation(kQuickFmodf, rl_src1, rl_src2, false);
       rl_result = GetReturn(kFPReg);
       StoreValue(rl_dest, rl_result);
       return;
@@ -92,8 +91,7 @@
     case Instruction::REM_DOUBLE_2ADDR:
     case Instruction::REM_DOUBLE:
       FlushAllRegs();   // Send everything to home location
-      CallRuntimeHelperRegLocationRegLocation(QUICK_ENTRYPOINT_OFFSET(4, pFmod), rl_src1, rl_src2,
-                                              false);
+      CallRuntimeHelperRegLocationRegLocation(kQuickFmod, rl_src1, rl_src2, false);
       rl_result = GetReturnWide(kFPReg);
       StoreValueWide(rl_dest, rl_result);
       return;
@@ -160,7 +158,7 @@
       return;
     }
     case Instruction::FLOAT_TO_LONG:
-      GenConversionCall(QUICK_ENTRYPOINT_OFFSET(4, pF2l), rl_dest, rl_src);
+      GenConversionCall(kQuickF2l, rl_dest, rl_src);
       return;
     case Instruction::LONG_TO_FLOAT: {
       rl_src = LoadValueWide(rl_src, kFPReg);
@@ -190,7 +188,7 @@
       return;
     }
     case Instruction::DOUBLE_TO_LONG:
-      GenConversionCall(QUICK_ENTRYPOINT_OFFSET(4, pD2l), rl_dest, rl_src);
+      GenConversionCall(kQuickD2l, rl_dest, rl_src);
       return;
     default:
       LOG(FATAL) << "Unexpected opcode: " << opcode;
diff --git a/compiler/dex/quick/arm/int_arm.cc b/compiler/dex/quick/arm/int_arm.cc
index a85b740..dd14ed9 100644
--- a/compiler/dex/quick/arm/int_arm.cc
+++ b/compiler/dex/quick/arm/int_arm.cc
@@ -784,18 +784,6 @@
   return true;
 }
 
-void ArmMir2Lir::OpLea(RegStorage r_base, RegStorage reg1, RegStorage reg2, int scale, int offset) {
-  LOG(FATAL) << "Unexpected use of OpLea for Arm";
-}
-
-void ArmMir2Lir::OpTlsCmp(ThreadOffset<4> offset, int val) {
-  LOG(FATAL) << "Unexpected use of OpTlsCmp for Arm";
-}
-
-void ArmMir2Lir::OpTlsCmp(ThreadOffset<8> offset, int val) {
-  UNIMPLEMENTED(FATAL) << "Should not be called.";
-}
-
 // Generate a CAS with memory_order_seq_cst semantics.
 bool ArmMir2Lir::GenInlinedCas(CallInfo* info, bool is_long, bool is_object) {
   DCHECK_EQ(cu_->instruction_set, kThumb2);
@@ -1097,9 +1085,8 @@
      */
     RegLocation rl_result;
     if (BadOverlap(rl_src1, rl_dest) || (BadOverlap(rl_src2, rl_dest))) {
-      ThreadOffset<4> func_offset = QUICK_ENTRYPOINT_OFFSET(4, pLmul);
       FlushAllRegs();
-      CallRuntimeHelperRegLocationRegLocation(func_offset, rl_src1, rl_src2, false);
+      CallRuntimeHelperRegLocationRegLocation(kQuickLmul, rl_src1, rl_src2, false);
       rl_result = GetReturnWide(kCoreReg);
       StoreValueWide(rl_dest, rl_result);
       return;
diff --git a/compiler/dex/quick/arm/target_arm.cc b/compiler/dex/quick/arm/target_arm.cc
index 8cc7596..0509ad3 100644
--- a/compiler/dex/quick/arm/target_arm.cc
+++ b/compiler/dex/quick/arm/target_arm.cc
@@ -720,16 +720,11 @@
   FreeTemp(rs_r3);
 }
 
-RegStorage ArmMir2Lir::LoadHelper(ThreadOffset<4> offset) {
-  LoadWordDisp(rs_rARM_SELF, offset.Int32Value(), rs_rARM_LR);
+RegStorage ArmMir2Lir::LoadHelper(QuickEntrypointEnum trampoline) {
+  LoadWordDisp(rs_rARM_SELF, GetThreadOffset<4>(trampoline).Int32Value(), rs_rARM_LR);
   return rs_rARM_LR;
 }
 
-RegStorage ArmMir2Lir::LoadHelper(ThreadOffset<8> offset) {
-  UNIMPLEMENTED(FATAL) << "Should not be called.";
-  return RegStorage::InvalidReg();
-}
-
 LIR* ArmMir2Lir::CheckSuspendUsingLoad() {
   RegStorage tmp = rs_r0;
   Load32Disp(rs_rARM_SELF, Thread::ThreadSuspendTriggerOffset<4>().Int32Value(), tmp);
diff --git a/compiler/dex/quick/arm/utility_arm.cc b/compiler/dex/quick/arm/utility_arm.cc
index 9bb9dda..cf21da7 100644
--- a/compiler/dex/quick/arm/utility_arm.cc
+++ b/compiler/dex/quick/arm/utility_arm.cc
@@ -1160,36 +1160,13 @@
   return res;
 }
 
-LIR* ArmMir2Lir::OpThreadMem(OpKind op, ThreadOffset<4> thread_offset) {
-  LOG(FATAL) << "Unexpected use of OpThreadMem for Arm";
-  return NULL;
-}
-
-LIR* ArmMir2Lir::OpThreadMem(OpKind op, ThreadOffset<8> thread_offset) {
-  UNIMPLEMENTED(FATAL) << "Should not be called.";
-  return nullptr;
-}
-
 LIR* ArmMir2Lir::OpMem(OpKind op, RegStorage r_base, int disp) {
   LOG(FATAL) << "Unexpected use of OpMem for Arm";
   return NULL;
 }
 
-LIR* ArmMir2Lir::StoreBaseIndexedDisp(RegStorage r_base, RegStorage r_index, int scale,
-                                      int displacement, RegStorage r_src, OpSize size) {
-  LOG(FATAL) << "Unexpected use of StoreBaseIndexedDisp for Arm";
-  return NULL;
-}
-
-LIR* ArmMir2Lir::OpRegMem(OpKind op, RegStorage r_dest, RegStorage r_base, int offset) {
-  LOG(FATAL) << "Unexpected use of OpRegMem for Arm";
-  return NULL;
-}
-
-LIR* ArmMir2Lir::LoadBaseIndexedDisp(RegStorage r_base, RegStorage r_index, int scale,
-                                     int displacement, RegStorage r_dest, OpSize size) {
-  LOG(FATAL) << "Unexpected use of LoadBaseIndexedDisp for Arm";
-  return NULL;
+LIR* ArmMir2Lir::InvokeTrampoline(OpKind op, RegStorage r_tgt, QuickEntrypointEnum trampoline) {
+  return OpReg(op, r_tgt);
 }
 
 }  // namespace art
diff --git a/compiler/dex/quick/arm64/codegen_arm64.h b/compiler/dex/quick/arm64/codegen_arm64.h
index 8d15326..ac36519 100644
--- a/compiler/dex/quick/arm64/codegen_arm64.h
+++ b/compiler/dex/quick/arm64/codegen_arm64.h
@@ -73,8 +73,7 @@
                             RegLocation rl_src, RegLocation rl_dest, int64_t lit);
     bool EasyMultiply(RegLocation rl_src, RegLocation rl_dest, int lit) OVERRIDE;
     LIR* CheckSuspendUsingLoad() OVERRIDE;
-    RegStorage LoadHelper(ThreadOffset<4> offset) OVERRIDE;
-    RegStorage LoadHelper(ThreadOffset<8> offset) OVERRIDE;
+    RegStorage LoadHelper(QuickEntrypointEnum trampoline) OVERRIDE;
     LIR* LoadBaseDisp(RegStorage r_base, int displacement, RegStorage r_dest,
                       OpSize size, VolatileKind is_volatile) OVERRIDE;
     LIR* LoadRefDisp(RegStorage r_base, int displacement, RegStorage r_dest,
@@ -84,8 +83,6 @@
                          OpSize size) OVERRIDE;
     LIR* LoadRefIndexed(RegStorage r_base, RegStorage r_index, RegStorage r_dest, int scale)
         OVERRIDE;
-    LIR* LoadBaseIndexedDisp(RegStorage r_base, RegStorage r_index, int scale, int displacement,
-                             RegStorage r_dest, OpSize size) OVERRIDE;
     LIR* LoadConstantNoClobber(RegStorage r_dest, int value);
     LIR* LoadConstantWide(RegStorage r_dest, int64_t value);
     LIR* StoreBaseDisp(RegStorage r_base, int displacement, RegStorage r_src,
@@ -96,8 +93,6 @@
                           OpSize size) OVERRIDE;
     LIR* StoreRefIndexed(RegStorage r_base, RegStorage r_index, RegStorage r_src, int scale)
         OVERRIDE;
-    LIR* StoreBaseIndexedDisp(RegStorage r_base, RegStorage r_index, int scale, int displacement,
-                              RegStorage r_src, OpSize size) OVERRIDE;
     void MarkGCCard(RegStorage val_reg, RegStorage tgt_addr_reg) OVERRIDE;
     LIR* OpCmpMemImmBranch(ConditionCode cond, RegStorage temp_reg, RegStorage base_reg,
                            int offset, int check_value, LIR* target, LIR** compare) OVERRIDE;
@@ -246,7 +241,6 @@
     LIR* OpRegCopyNoInsert(RegStorage r_dest, RegStorage r_src);
     LIR* OpRegImm64(OpKind op, RegStorage r_dest_src1, int64_t value);
     LIR* OpRegImm(OpKind op, RegStorage r_dest_src1, int value);
-    LIR* OpRegMem(OpKind op, RegStorage r_dest, RegStorage r_base, int offset);
     LIR* OpRegReg(OpKind op, RegStorage r_dest_src1, RegStorage r_src2);
     LIR* OpMovRegMem(RegStorage r_dest, RegStorage r_base, int offset, MoveType move_type);
     LIR* OpMovMemReg(RegStorage r_base, int offset, RegStorage r_src, MoveType move_type);
@@ -255,14 +249,9 @@
     LIR* OpRegRegImm(OpKind op, RegStorage r_dest, RegStorage r_src1, int value);
     LIR* OpRegRegReg(OpKind op, RegStorage r_dest, RegStorage r_src1, RegStorage r_src2);
     LIR* OpTestSuspend(LIR* target);
-    LIR* OpThreadMem(OpKind op, ThreadOffset<4> thread_offset) OVERRIDE;
-    LIR* OpThreadMem(OpKind op, ThreadOffset<8> thread_offset) OVERRIDE;
     LIR* OpVldm(RegStorage r_base, int count);
     LIR* OpVstm(RegStorage r_base, int count);
-    void OpLea(RegStorage r_base, RegStorage reg1, RegStorage reg2, int scale, int offset);
     void OpRegCopyWide(RegStorage dest, RegStorage src);
-    void OpTlsCmp(ThreadOffset<4> offset, int val) OVERRIDE;
-    void OpTlsCmp(ThreadOffset<8> offset, int val) OVERRIDE;
 
     LIR* LoadBaseDispBody(RegStorage r_base, int displacement, RegStorage r_dest, OpSize size);
     LIR* StoreBaseDispBody(RegStorage r_base, int displacement, RegStorage r_src, OpSize size);
@@ -309,6 +298,8 @@
       return true;  // 64b architecture.
     }
 
+    LIR* InvokeTrampoline(OpKind op, RegStorage r_tgt, QuickEntrypointEnum trampoline) OVERRIDE;
+
   private:
     /**
      * @brief Given register xNN (dNN), returns register wNN (sNN).
diff --git a/compiler/dex/quick/arm64/fp_arm64.cc b/compiler/dex/quick/arm64/fp_arm64.cc
index 175cef1..ed13c04 100644
--- a/compiler/dex/quick/arm64/fp_arm64.cc
+++ b/compiler/dex/quick/arm64/fp_arm64.cc
@@ -45,8 +45,7 @@
     case Instruction::REM_FLOAT_2ADDR:
     case Instruction::REM_FLOAT:
       FlushAllRegs();   // Send everything to home location
-      CallRuntimeHelperRegLocationRegLocation(QUICK_ENTRYPOINT_OFFSET(8, pFmodf), rl_src1, rl_src2,
-                                              false);
+      CallRuntimeHelperRegLocationRegLocation(kQuickFmodf, rl_src1, rl_src2, false);
       rl_result = GetReturn(kFPReg);
       StoreValue(rl_dest, rl_result);
       return;
@@ -89,12 +88,11 @@
     case Instruction::REM_DOUBLE:
       FlushAllRegs();   // Send everything to home location
       {
-        ThreadOffset<8> helper_offset = QUICK_ENTRYPOINT_OFFSET(8, pFmod);
-        RegStorage r_tgt = CallHelperSetup(helper_offset);
+        RegStorage r_tgt = CallHelperSetup(kQuickFmod);
         LoadValueDirectWideFixed(rl_src1, rs_d0);
         LoadValueDirectWideFixed(rl_src2, rs_d1);
         ClobberCallerSave();
-        CallHelper(r_tgt, helper_offset, false);
+        CallHelper(r_tgt, kQuickFmod, false);
       }
       rl_result = GetReturnWide(kFPReg);
       StoreValueWide(rl_dest, rl_result);
diff --git a/compiler/dex/quick/arm64/int_arm64.cc b/compiler/dex/quick/arm64/int_arm64.cc
index aed8de8..f9f85f4 100644
--- a/compiler/dex/quick/arm64/int_arm64.cc
+++ b/compiler/dex/quick/arm64/int_arm64.cc
@@ -701,18 +701,6 @@
   return true;
 }
 
-void Arm64Mir2Lir::OpLea(RegStorage r_base, RegStorage reg1, RegStorage reg2, int scale, int offset) {
-  LOG(FATAL) << "Unexpected use of OpLea for Arm64";
-}
-
-void Arm64Mir2Lir::OpTlsCmp(ThreadOffset<4> offset, int val) {
-  UNIMPLEMENTED(FATAL) << "Should not be used.";
-}
-
-void Arm64Mir2Lir::OpTlsCmp(ThreadOffset<8> offset, int val) {
-  LOG(FATAL) << "Unexpected use of OpTlsCmp for Arm64";
-}
-
 bool Arm64Mir2Lir::GenInlinedCas(CallInfo* info, bool is_long, bool is_object) {
   DCHECK_EQ(cu_->instruction_set, kArm64);
   // Unused - RegLocation rl_src_unsafe = info->args[0];
diff --git a/compiler/dex/quick/arm64/target_arm64.cc b/compiler/dex/quick/arm64/target_arm64.cc
index f1dc77a..dec81cb 100644
--- a/compiler/dex/quick/arm64/target_arm64.cc
+++ b/compiler/dex/quick/arm64/target_arm64.cc
@@ -758,15 +758,10 @@
   FreeTemp(rs_f7);
 }
 
-RegStorage Arm64Mir2Lir::LoadHelper(ThreadOffset<4> offset) {
-  UNIMPLEMENTED(FATAL) << "Should not be called.";
-  return RegStorage::InvalidReg();
-}
-
-RegStorage Arm64Mir2Lir::LoadHelper(ThreadOffset<8> offset) {
+RegStorage Arm64Mir2Lir::LoadHelper(QuickEntrypointEnum trampoline) {
   // TODO(Arm64): use LoadWordDisp instead.
   //   e.g. LoadWordDisp(rs_rA64_SELF, offset.Int32Value(), rs_rA64_LR);
-  LoadBaseDisp(rs_xSELF, offset.Int32Value(), rs_xLR, k64, kNotVolatile);
+  LoadBaseDisp(rs_xSELF, GetThreadOffset<8>(trampoline).Int32Value(), rs_xLR, k64, kNotVolatile);
   return rs_xLR;
 }
 
diff --git a/compiler/dex/quick/arm64/utility_arm64.cc b/compiler/dex/quick/arm64/utility_arm64.cc
index fdebb92..f6c140f 100644
--- a/compiler/dex/quick/arm64/utility_arm64.cc
+++ b/compiler/dex/quick/arm64/utility_arm64.cc
@@ -1262,36 +1262,13 @@
   return NULL;
 }
 
-LIR* Arm64Mir2Lir::OpThreadMem(OpKind op, ThreadOffset<4> thread_offset) {
-  UNIMPLEMENTED(FATAL) << "Should not be used.";
-  return nullptr;
-}
-
-LIR* Arm64Mir2Lir::OpThreadMem(OpKind op, ThreadOffset<8> thread_offset) {
-  LOG(FATAL) << "Unexpected use of OpThreadMem for Arm64";
-  return NULL;
-}
-
 LIR* Arm64Mir2Lir::OpMem(OpKind op, RegStorage r_base, int disp) {
   LOG(FATAL) << "Unexpected use of OpMem for Arm64";
   return NULL;
 }
 
-LIR* Arm64Mir2Lir::StoreBaseIndexedDisp(RegStorage r_base, RegStorage r_index, int scale,
-                                        int displacement, RegStorage r_src, OpSize size) {
-  LOG(FATAL) << "Unexpected use of StoreBaseIndexedDisp for Arm64";
-  return NULL;
-}
-
-LIR* Arm64Mir2Lir::OpRegMem(OpKind op, RegStorage r_dest, RegStorage r_base, int offset) {
-  LOG(FATAL) << "Unexpected use of OpRegMem for Arm64";
-  return NULL;
-}
-
-LIR* Arm64Mir2Lir::LoadBaseIndexedDisp(RegStorage r_base, RegStorage r_index, int scale,
-                                       int displacement, RegStorage r_dest, OpSize size) {
-  LOG(FATAL) << "Unexpected use of LoadBaseIndexedDisp for Arm64";
-  return NULL;
+LIR* Arm64Mir2Lir::InvokeTrampoline(OpKind op, RegStorage r_tgt, QuickEntrypointEnum trampoline) {
+  return OpReg(op, r_tgt);
 }
 
 }  // namespace art
diff --git a/compiler/dex/quick/gen_common.cc b/compiler/dex/quick/gen_common.cc
index b94e816..f0e4d9c 100644
--- a/compiler/dex/quick/gen_common.cc
+++ b/compiler/dex/quick/gen_common.cc
@@ -73,11 +73,7 @@
       m2l_->ResetRegPool();
       m2l_->ResetDefTracking();
       GenerateTargetLabel(kPseudoThrowTarget);
-      if (m2l_->cu_->target64) {
-        m2l_->CallRuntimeHelper(QUICK_ENTRYPOINT_OFFSET(8, pThrowDivZero), true);
-      } else {
-        m2l_->CallRuntimeHelper(QUICK_ENTRYPOINT_OFFSET(4, pThrowDivZero), true);
-      }
+      m2l_->CallRuntimeHelper(kQuickThrowDivZero, true);
     }
   };
 
@@ -96,13 +92,7 @@
       m2l_->ResetRegPool();
       m2l_->ResetDefTracking();
       GenerateTargetLabel(kPseudoThrowTarget);
-      if (m2l_->cu_->target64) {
-        m2l_->CallRuntimeHelperRegReg(QUICK_ENTRYPOINT_OFFSET(8, pThrowArrayBounds),
-                                      index_, length_, true);
-      } else {
-        m2l_->CallRuntimeHelperRegReg(QUICK_ENTRYPOINT_OFFSET(4, pThrowArrayBounds),
-                                      index_, length_, true);
-      }
+      m2l_->CallRuntimeHelperRegReg(kQuickThrowArrayBounds, index_, length_, true);
     }
 
    private:
@@ -132,13 +122,7 @@
 
       m2l_->OpRegCopy(arg1_32, length_);
       m2l_->LoadConstant(arg0_32, index_);
-      if (m2l_->cu_->target64) {
-        m2l_->CallRuntimeHelperRegReg(QUICK_ENTRYPOINT_OFFSET(8, pThrowArrayBounds),
-                                      arg0_32, arg1_32, true);
-      } else {
-        m2l_->CallRuntimeHelperRegReg(QUICK_ENTRYPOINT_OFFSET(4, pThrowArrayBounds),
-                                      arg0_32, arg1_32, true);
-      }
+      m2l_->CallRuntimeHelperRegReg(kQuickThrowArrayBounds, arg0_32, arg1_32, true);
     }
 
    private:
@@ -161,11 +145,7 @@
       m2l_->ResetRegPool();
       m2l_->ResetDefTracking();
       GenerateTargetLabel(kPseudoThrowTarget);
-      if (m2l_->cu_->target64) {
-        m2l_->CallRuntimeHelper(QUICK_ENTRYPOINT_OFFSET(8, pThrowNullPointer), true);
-      } else {
-        m2l_->CallRuntimeHelper(QUICK_ENTRYPOINT_OFFSET(4, pThrowNullPointer), true);
-      }
+      m2l_->CallRuntimeHelper(kQuickThrowNullPointer, true);
     }
   };
 
@@ -360,16 +340,17 @@
   StoreValue(rl_dest, rl_result);
 }
 
-template <size_t pointer_size>
-static void GenNewArrayImpl(Mir2Lir* mir_to_lir, CompilationUnit* cu,
-                            uint32_t type_idx, RegLocation rl_dest,
-                            RegLocation rl_src) {
-  mir_to_lir->FlushAllRegs();  /* Everything to home location */
-  ThreadOffset<pointer_size> func_offset(-1);
-  const DexFile* dex_file = cu->dex_file;
-  CompilerDriver* driver = cu->compiler_driver;
-  if (cu->compiler_driver->CanAccessTypeWithoutChecks(cu->method_idx, *dex_file,
-                                                      type_idx)) {
+/*
+ * Let helper function take care of everything.  Will call
+ * Array::AllocFromCode(type_idx, method, count);
+ * Note: AllocFromCode will handle checks for errNegativeArraySize.
+ */
+void Mir2Lir::GenNewArray(uint32_t type_idx, RegLocation rl_dest,
+                          RegLocation rl_src) {
+  FlushAllRegs();  /* Everything to home location */
+  const DexFile* dex_file = cu_->dex_file;
+  CompilerDriver* driver = cu_->compiler_driver;
+  if (cu_->compiler_driver->CanAccessTypeWithoutChecks(cu_->method_idx, *dex_file, type_idx)) {
     bool is_type_initialized;  // Ignored as an array does not have an initializer.
     bool use_direct_type_ptr;
     uintptr_t direct_type_ptr;
@@ -379,55 +360,22 @@
                                    &direct_type_ptr, &is_finalizable)) {
       // The fast path.
       if (!use_direct_type_ptr) {
-        mir_to_lir->LoadClassType(type_idx, kArg0);
-        func_offset = QUICK_ENTRYPOINT_OFFSET(pointer_size, pAllocArrayResolved);
-        mir_to_lir->CallRuntimeHelperRegMethodRegLocation(func_offset,
-                                                          mir_to_lir->TargetReg(kArg0, kNotWide),
-                                                          rl_src, true);
+        LoadClassType(type_idx, kArg0);
+        CallRuntimeHelperRegMethodRegLocation(kQuickAllocArrayResolved, TargetReg(kArg0, kNotWide),
+                                              rl_src, true);
       } else {
         // Use the direct pointer.
-        func_offset = QUICK_ENTRYPOINT_OFFSET(pointer_size, pAllocArrayResolved);
-        mir_to_lir->CallRuntimeHelperImmMethodRegLocation(func_offset, direct_type_ptr, rl_src,
-                                                          true);
+        CallRuntimeHelperImmMethodRegLocation(kQuickAllocArrayResolved, direct_type_ptr, rl_src,
+                                              true);
       }
     } else {
       // The slow path.
-      func_offset = QUICK_ENTRYPOINT_OFFSET(pointer_size, pAllocArray);
-      mir_to_lir->CallRuntimeHelperImmMethodRegLocation(func_offset, type_idx, rl_src, true);
+      CallRuntimeHelperImmMethodRegLocation(kQuickAllocArray, type_idx, rl_src, true);
     }
-    DCHECK_NE(func_offset.Int32Value(), -1);
   } else {
-    func_offset = QUICK_ENTRYPOINT_OFFSET(pointer_size, pAllocArrayWithAccessCheck);
-    mir_to_lir->CallRuntimeHelperImmMethodRegLocation(func_offset, type_idx, rl_src, true);
+    CallRuntimeHelperImmMethodRegLocation(kQuickAllocArrayWithAccessCheck, type_idx, rl_src, true);
   }
-  RegLocation rl_result = mir_to_lir->GetReturn(kRefReg);
-  mir_to_lir->StoreValue(rl_dest, rl_result);
-}
-
-/*
- * Let helper function take care of everything.  Will call
- * Array::AllocFromCode(type_idx, method, count);
- * Note: AllocFromCode will handle checks for errNegativeArraySize.
- */
-void Mir2Lir::GenNewArray(uint32_t type_idx, RegLocation rl_dest,
-                          RegLocation rl_src) {
-  if (cu_->target64) {
-    GenNewArrayImpl<8>(this, cu_, type_idx, rl_dest, rl_src);
-  } else {
-    GenNewArrayImpl<4>(this, cu_, type_idx, rl_dest, rl_src);
-  }
-}
-
-template <size_t pointer_size>
-static void GenFilledNewArrayCall(Mir2Lir* mir_to_lir, CompilationUnit* cu, int elems, int type_idx) {
-  ThreadOffset<pointer_size> func_offset(-1);
-  if (cu->compiler_driver->CanAccessTypeWithoutChecks(cu->method_idx, *cu->dex_file,
-                                                      type_idx)) {
-    func_offset = QUICK_ENTRYPOINT_OFFSET(pointer_size, pCheckAndAllocArray);
-  } else {
-    func_offset = QUICK_ENTRYPOINT_OFFSET(pointer_size, pCheckAndAllocArrayWithAccessCheck);
-  }
-  mir_to_lir->CallRuntimeHelperImmMethodImm(func_offset, type_idx, elems, true);
+  StoreValue(rl_dest, GetReturn(kRefReg));
 }
 
 /*
@@ -440,11 +388,14 @@
   int elems = info->num_arg_words;
   int type_idx = info->index;
   FlushAllRegs();  /* Everything to home location */
-  if (cu_->target64) {
-    GenFilledNewArrayCall<8>(this, cu_, elems, type_idx);
+  QuickEntrypointEnum target;
+  if (cu_->compiler_driver->CanAccessTypeWithoutChecks(cu_->method_idx, *cu_->dex_file,
+                                                       type_idx)) {
+    target = kQuickCheckAndAllocArray;
   } else {
-    GenFilledNewArrayCall<4>(this, cu_, elems, type_idx);
+    target = kQuickCheckAndAllocArrayWithAccessCheck;
   }
+  CallRuntimeHelperImmMethodImm(target, type_idx, elems, true);
   FreeTemp(TargetReg(kArg2, kNotWide));
   FreeTemp(TargetReg(kArg1, kNotWide));
   /*
@@ -559,13 +510,7 @@
   void Compile() {
     LIR* unresolved_target = GenerateTargetLabel();
     uninit_->target = unresolved_target;
-    if (cu_->target64) {
-      m2l_->CallRuntimeHelperImm(QUICK_ENTRYPOINT_OFFSET(8, pInitializeStaticStorage),
-                                 storage_index_, true);
-    } else {
-      m2l_->CallRuntimeHelperImm(QUICK_ENTRYPOINT_OFFSET(4, pInitializeStaticStorage),
-                                 storage_index_, true);
-    }
+    m2l_->CallRuntimeHelperImm(kQuickInitializeStaticStorage, storage_index_, true);
     // Copy helper's result into r_base, a no-op on all but MIPS.
     m2l_->OpRegCopy(r_base_,  m2l_->TargetReg(kRet0, kRef));
 
@@ -578,17 +523,6 @@
   const RegStorage r_base_;
 };
 
-template <size_t pointer_size>
-static void GenSputCall(Mir2Lir* mir_to_lir, bool is_long_or_double, bool is_object,
-                        const MirSFieldLoweringInfo* field_info, RegLocation rl_src) {
-  ThreadOffset<pointer_size> setter_offset =
-      is_long_or_double ? QUICK_ENTRYPOINT_OFFSET(pointer_size, pSet64Static)
-          : (is_object ? QUICK_ENTRYPOINT_OFFSET(pointer_size, pSetObjStatic)
-              : QUICK_ENTRYPOINT_OFFSET(pointer_size, pSet32Static));
-  mir_to_lir->CallRuntimeHelperImmRegLocation(setter_offset, field_info->FieldIndex(), rl_src,
-                                              true);
-}
-
 void Mir2Lir::GenSput(MIR* mir, RegLocation rl_src, bool is_long_or_double,
                       bool is_object) {
   const MirSFieldLoweringInfo& field_info = mir_graph_->GetSFieldLoweringInfo(mir);
@@ -671,24 +605,13 @@
     FreeTemp(r_base);
   } else {
     FlushAllRegs();  // Everything to home locations
-    if (cu_->target64) {
-      GenSputCall<8>(this, is_long_or_double, is_object, &field_info, rl_src);
-    } else {
-      GenSputCall<4>(this, is_long_or_double, is_object, &field_info, rl_src);
-    }
+    QuickEntrypointEnum target =
+        is_long_or_double ? kQuickSet64Static
+            : (is_object ? kQuickSetObjStatic : kQuickSet32Static);
+    CallRuntimeHelperImmRegLocation(target, field_info.FieldIndex(), rl_src, true);
   }
 }
 
-template <size_t pointer_size>
-static void GenSgetCall(Mir2Lir* mir_to_lir, bool is_long_or_double, bool is_object,
-                        const MirSFieldLoweringInfo* field_info) {
-  ThreadOffset<pointer_size> getter_offset =
-      is_long_or_double ? QUICK_ENTRYPOINT_OFFSET(pointer_size, pGet64Static)
-          : (is_object ? QUICK_ENTRYPOINT_OFFSET(pointer_size, pGetObjStatic)
-              : QUICK_ENTRYPOINT_OFFSET(pointer_size, pGet32Static));
-  mir_to_lir->CallRuntimeHelperImm(getter_offset, field_info->FieldIndex(), true);
-}
-
 void Mir2Lir::GenSget(MIR* mir, RegLocation rl_dest,
                       bool is_long_or_double, bool is_object) {
   const MirSFieldLoweringInfo& field_info = mir_graph_->GetSFieldLoweringInfo(mir);
@@ -764,11 +687,11 @@
     }
   } else {
     FlushAllRegs();  // Everything to home locations
-    if (cu_->target64) {
-      GenSgetCall<8>(this, is_long_or_double, is_object, &field_info);
-    } else {
-      GenSgetCall<4>(this, is_long_or_double, is_object, &field_info);
-    }
+    QuickEntrypointEnum target =
+        is_long_or_double ? kQuickGet64Static
+            : (is_object ? kQuickGetObjStatic : kQuickGet32Static);
+    CallRuntimeHelperImm(target, field_info.FieldIndex(), true);
+
     // FIXME: pGetXXStatic always return an int or int64 regardless of rl_dest.fp.
     if (is_long_or_double) {
       RegLocation rl_result = GetReturnWide(kCoreReg);
@@ -791,19 +714,6 @@
   slow_paths_.Reset();
 }
 
-template <size_t pointer_size>
-static void GenIgetCall(Mir2Lir* mir_to_lir, bool is_long_or_double, bool is_object,
-                        const MirIFieldLoweringInfo* field_info, RegLocation rl_obj) {
-  ThreadOffset<pointer_size> getter_offset =
-      is_long_or_double ? QUICK_ENTRYPOINT_OFFSET(pointer_size, pGet64Instance)
-          : (is_object ? QUICK_ENTRYPOINT_OFFSET(pointer_size, pGetObjInstance)
-              : QUICK_ENTRYPOINT_OFFSET(pointer_size, pGet32Instance));
-  // Second argument of pGetXXInstance is always a reference.
-  DCHECK_EQ(static_cast<unsigned int>(rl_obj.wide), 0U);
-  mir_to_lir->CallRuntimeHelperImmRegLocation(getter_offset, field_info->FieldIndex(), rl_obj,
-                                              true);
-}
-
 void Mir2Lir::GenIGet(MIR* mir, int opt_flags, OpSize size,
                       RegLocation rl_dest, RegLocation rl_obj, bool is_long_or_double,
                       bool is_object) {
@@ -832,11 +742,13 @@
       StoreValue(rl_dest, rl_result);
     }
   } else {
-    if (cu_->target64) {
-      GenIgetCall<8>(this, is_long_or_double, is_object, &field_info, rl_obj);
-    } else {
-      GenIgetCall<4>(this, is_long_or_double, is_object, &field_info, rl_obj);
-    }
+    QuickEntrypointEnum target =
+        is_long_or_double ? kQuickGet64Instance
+            : (is_object ? kQuickGetObjInstance : kQuickGet32Instance);
+    // Second argument of pGetXXInstance is always a reference.
+    DCHECK_EQ(static_cast<unsigned int>(rl_obj.wide), 0U);
+    CallRuntimeHelperImmRegLocation(target, field_info.FieldIndex(), rl_obj, true);
+
     // FIXME: pGetXXInstance always return an int or int64 regardless of rl_dest.fp.
     if (is_long_or_double) {
       RegLocation rl_result = GetReturnWide(kCoreReg);
@@ -848,18 +760,6 @@
   }
 }
 
-template <size_t pointer_size>
-static void GenIputCall(Mir2Lir* mir_to_lir, bool is_long_or_double, bool is_object,
-                        const MirIFieldLoweringInfo* field_info, RegLocation rl_obj,
-                        RegLocation rl_src) {
-  ThreadOffset<pointer_size> setter_offset =
-      is_long_or_double ? QUICK_ENTRYPOINT_OFFSET(pointer_size, pSet64Instance)
-          : (is_object ? QUICK_ENTRYPOINT_OFFSET(pointer_size, pSetObjInstance)
-              : QUICK_ENTRYPOINT_OFFSET(pointer_size, pSet32Instance));
-  mir_to_lir->CallRuntimeHelperImmRegLocationRegLocation(setter_offset, field_info->FieldIndex(),
-                                                         rl_obj, rl_src, true);
-}
-
 void Mir2Lir::GenIPut(MIR* mir, int opt_flags, OpSize size,
                       RegLocation rl_src, RegLocation rl_obj, bool is_long_or_double,
                       bool is_object) {
@@ -890,35 +790,24 @@
       MarkGCCard(rl_src.reg, rl_obj.reg);
     }
   } else {
-    if (cu_->target64) {
-      GenIputCall<8>(this, is_long_or_double, is_object, &field_info, rl_obj, rl_src);
-    } else {
-      GenIputCall<4>(this, is_long_or_double, is_object, &field_info, rl_obj, rl_src);
-    }
+    QuickEntrypointEnum target =
+        is_long_or_double ? kQuickSet64Instance
+            : (is_object ? kQuickSetObjInstance : kQuickSet32Instance);
+    CallRuntimeHelperImmRegLocationRegLocation(target, field_info.FieldIndex(), rl_obj, rl_src,
+                                               true);
   }
 }
 
-template <size_t pointer_size>
-static void GenArrayObjPutCall(Mir2Lir* mir_to_lir, bool needs_range_check, bool needs_null_check,
-                               RegLocation rl_array, RegLocation rl_index, RegLocation rl_src) {
-  ThreadOffset<pointer_size> helper = needs_range_check
-        ? (needs_null_check ? QUICK_ENTRYPOINT_OFFSET(pointer_size, pAputObjectWithNullAndBoundCheck)
-                            : QUICK_ENTRYPOINT_OFFSET(pointer_size, pAputObjectWithBoundCheck))
-        : QUICK_ENTRYPOINT_OFFSET(pointer_size, pAputObject);
-  mir_to_lir->CallRuntimeHelperRegLocationRegLocationRegLocation(helper, rl_array, rl_index, rl_src,
-                                                                 true);
-}
-
 void Mir2Lir::GenArrayObjPut(int opt_flags, RegLocation rl_array, RegLocation rl_index,
                              RegLocation rl_src) {
   bool needs_range_check = !(opt_flags & MIR_IGNORE_RANGE_CHECK);
   bool needs_null_check = !((cu_->disable_opt & (1 << kNullCheckElimination)) &&
       (opt_flags & MIR_IGNORE_NULL_CHECK));
-  if (cu_->target64) {
-    GenArrayObjPutCall<8>(this, needs_range_check, needs_null_check, rl_array, rl_index, rl_src);
-  } else {
-    GenArrayObjPutCall<4>(this, needs_range_check, needs_null_check, rl_array, rl_index, rl_src);
-  }
+  QuickEntrypointEnum target = needs_range_check
+        ? (needs_null_check ? kQuickAputObjectWithNullAndBoundCheck
+                            : kQuickAputObjectWithBoundCheck)
+        : kQuickAputObject;
+  CallRuntimeHelperRegLocationRegLocationRegLocation(target, rl_array, rl_index, rl_src, true);
 }
 
 void Mir2Lir::GenConstClass(uint32_t type_idx, RegLocation rl_dest) {
@@ -931,13 +820,7 @@
                                                         type_idx)) {
     // Call out to helper which resolves type and verifies access.
     // Resolved type returned in kRet0.
-    if (cu_->target64) {
-      CallRuntimeHelperImmReg(QUICK_ENTRYPOINT_OFFSET(8, pInitializeTypeAndVerifyAccess),
-                              type_idx, rl_method.reg, true);
-    } else {
-      CallRuntimeHelperImmReg(QUICK_ENTRYPOINT_OFFSET(4, pInitializeTypeAndVerifyAccess),
-                              type_idx, rl_method.reg, true);
-    }
+    CallRuntimeHelperImmReg(kQuickInitializeTypeAndVerifyAccess, type_idx, rl_method.reg, true);
     RegLocation rl_result = GetReturn(kRefReg);
     StoreValue(rl_dest, rl_result);
   } else {
@@ -966,15 +849,8 @@
         void Compile() {
           GenerateTargetLabel();
 
-          if (cu_->target64) {
-            m2l_->CallRuntimeHelperImmReg(QUICK_ENTRYPOINT_OFFSET(8, pInitializeType), type_idx_,
-                                          rl_method_.reg, true);
-          } else {
-            m2l_->CallRuntimeHelperImmReg(QUICK_ENTRYPOINT_OFFSET(4, pInitializeType), type_idx_,
-                                                      rl_method_.reg, true);
-          }
+          m2l_->CallRuntimeHelperImmReg(kQuickInitializeType, type_idx_, rl_method_.reg, true);
           m2l_->OpRegCopy(rl_result_.reg,  m2l_->TargetReg(kRet0, kRef));
-
           m2l_->OpUnconditionalBranch(cont_);
         }
 
@@ -1035,13 +911,7 @@
 
         void Compile() {
           GenerateTargetLabel();
-          if (cu_->target64) {
-            m2l_->CallRuntimeHelperRegImm(QUICK_ENTRYPOINT_OFFSET(8, pResolveString),
-                                          r_method_, string_idx_, true);
-          } else {
-            m2l_->CallRuntimeHelperRegImm(QUICK_ENTRYPOINT_OFFSET(4, pResolveString),
-                                          r_method_, string_idx_, true);
-          }
+          m2l_->CallRuntimeHelperRegImm(kQuickResolveString, r_method_, string_idx_, true);
           m2l_->OpUnconditionalBranch(cont_);
         }
 
@@ -1066,17 +936,17 @@
   }
 }
 
-template <size_t pointer_size>
-static void GenNewInstanceImpl(Mir2Lir* mir_to_lir, CompilationUnit* cu, uint32_t type_idx,
-                               RegLocation rl_dest) {
-  mir_to_lir->FlushAllRegs();  /* Everything to home location */
+/*
+ * Let helper function take care of everything.  Will
+ * call Class::NewInstanceFromCode(type_idx, method);
+ */
+void Mir2Lir::GenNewInstance(uint32_t type_idx, RegLocation rl_dest) {
+  FlushAllRegs();  /* Everything to home location */
   // alloc will always check for resolution, do we also need to verify
   // access because the verifier was unable to?
-  ThreadOffset<pointer_size> func_offset(-1);
-  const DexFile* dex_file = cu->dex_file;
-  CompilerDriver* driver = cu->compiler_driver;
-  if (driver->CanAccessInstantiableTypeWithoutChecks(
-      cu->method_idx, *dex_file, type_idx)) {
+  const DexFile* dex_file = cu_->dex_file;
+  CompilerDriver* driver = cu_->compiler_driver;
+  if (driver->CanAccessInstantiableTypeWithoutChecks(cu_->method_idx, *dex_file, type_idx)) {
     bool is_type_initialized;
     bool use_direct_type_ptr;
     uintptr_t direct_type_ptr;
@@ -1087,60 +957,33 @@
                                    !is_finalizable) {
       // The fast path.
       if (!use_direct_type_ptr) {
-        mir_to_lir->LoadClassType(type_idx, kArg0);
+        LoadClassType(type_idx, kArg0);
         if (!is_type_initialized) {
-          func_offset = QUICK_ENTRYPOINT_OFFSET(pointer_size, pAllocObjectResolved);
-          mir_to_lir->CallRuntimeHelperRegMethod(func_offset, mir_to_lir->TargetReg(kArg0, kRef),
-                                                 true);
+          CallRuntimeHelperRegMethod(kQuickAllocObjectResolved, TargetReg(kArg0, kRef), true);
         } else {
-          func_offset = QUICK_ENTRYPOINT_OFFSET(pointer_size, pAllocObjectInitialized);
-          mir_to_lir->CallRuntimeHelperRegMethod(func_offset, mir_to_lir->TargetReg(kArg0, kRef),
-                                                 true);
+          CallRuntimeHelperRegMethod(kQuickAllocObjectInitialized, TargetReg(kArg0, kRef), true);
         }
       } else {
         // Use the direct pointer.
         if (!is_type_initialized) {
-          func_offset = QUICK_ENTRYPOINT_OFFSET(pointer_size, pAllocObjectResolved);
-          mir_to_lir->CallRuntimeHelperImmMethod(func_offset, direct_type_ptr, true);
+          CallRuntimeHelperImmMethod(kQuickAllocObjectResolved, direct_type_ptr, true);
         } else {
-          func_offset = QUICK_ENTRYPOINT_OFFSET(pointer_size, pAllocObjectInitialized);
-          mir_to_lir->CallRuntimeHelperImmMethod(func_offset, direct_type_ptr, true);
+          CallRuntimeHelperImmMethod(kQuickAllocObjectInitialized, direct_type_ptr, true);
         }
       }
     } else {
       // The slow path.
-      DCHECK_EQ(func_offset.Int32Value(), -1);
-      func_offset = QUICK_ENTRYPOINT_OFFSET(pointer_size, pAllocObject);
-      mir_to_lir->CallRuntimeHelperImmMethod(func_offset, type_idx, true);
+      CallRuntimeHelperImmMethod(kQuickAllocObject, type_idx, true);
     }
-    DCHECK_NE(func_offset.Int32Value(), -1);
   } else {
-    func_offset = QUICK_ENTRYPOINT_OFFSET(pointer_size, pAllocObjectWithAccessCheck);
-    mir_to_lir->CallRuntimeHelperImmMethod(func_offset, type_idx, true);
+    CallRuntimeHelperImmMethod(kQuickAllocObjectWithAccessCheck, type_idx, true);
   }
-  RegLocation rl_result = mir_to_lir->GetReturn(kRefReg);
-  mir_to_lir->StoreValue(rl_dest, rl_result);
-}
-
-/*
- * Let helper function take care of everything.  Will
- * call Class::NewInstanceFromCode(type_idx, method);
- */
-void Mir2Lir::GenNewInstance(uint32_t type_idx, RegLocation rl_dest) {
-  if (cu_->target64) {
-    GenNewInstanceImpl<8>(this, cu_, type_idx, rl_dest);
-  } else {
-    GenNewInstanceImpl<4>(this, cu_, type_idx, rl_dest);
-  }
+  StoreValue(rl_dest, GetReturn(kRefReg));
 }
 
 void Mir2Lir::GenThrow(RegLocation rl_src) {
   FlushAllRegs();
-  if (cu_->target64) {
-    CallRuntimeHelperRegLocation(QUICK_ENTRYPOINT_OFFSET(8, pDeliverException), rl_src, true);
-  } else {
-    CallRuntimeHelperRegLocation(QUICK_ENTRYPOINT_OFFSET(4, pDeliverException), rl_src, true);
-  }
+  CallRuntimeHelperRegLocation(kQuickDeliverException, rl_src, true);
 }
 
 // For final classes there are no sub-classes to check and so we can answer the instance-of
@@ -1214,13 +1057,7 @@
   if (needs_access_check) {
     // Check we have access to type_idx and if not throw IllegalAccessError,
     // returns Class* in kArg0
-    if (cu_->target64) {
-      CallRuntimeHelperImm(QUICK_ENTRYPOINT_OFFSET(8, pInitializeTypeAndVerifyAccess),
-                           type_idx, true);
-    } else {
-      CallRuntimeHelperImm(QUICK_ENTRYPOINT_OFFSET(4, pInitializeTypeAndVerifyAccess),
-                           type_idx, true);
-    }
+    CallRuntimeHelperImm(kQuickInitializeTypeAndVerifyAccess, type_idx, true);
     OpRegCopy(class_reg, ret_reg);  // Align usage with fast path
     LoadValueDirectFixed(rl_src, ref_reg);  // kArg0 <= ref
   } else if (use_declaring_class) {
@@ -1256,16 +1093,9 @@
         void Compile() OVERRIDE {
           GenerateTargetLabel();
 
-          if (cu_->target64) {
-            m2l_->CallRuntimeHelperImm(QUICK_ENTRYPOINT_OFFSET(8, pInitializeType), type_idx_,
-                                       true);
-          } else {
-            m2l_->CallRuntimeHelperImm(QUICK_ENTRYPOINT_OFFSET(4, pInitializeType), type_idx_,
-                                       true);
-          }
+          m2l_->CallRuntimeHelperImm(kQuickInitializeType, type_idx_, true);
           m2l_->OpRegCopy(m2l_->TargetReg(kArg2, kRef),
                           m2l_->TargetReg(kRet0, kRef));  // Align usage with fast path
-
           m2l_->OpUnconditionalBranch(cont_);
         }
 
@@ -1299,9 +1129,7 @@
                      kCoreReg);
   } else {
     if (cu_->instruction_set == kThumb2) {
-      RegStorage r_tgt = cu_->target64 ?
-          LoadHelper(QUICK_ENTRYPOINT_OFFSET(8, pInstanceofNonTrivial)) :
-          LoadHelper(QUICK_ENTRYPOINT_OFFSET(4, pInstanceofNonTrivial));
+      RegStorage r_tgt = LoadHelper(kQuickInstanceofNonTrivial);
       LIR* it = nullptr;
       if (!type_known_abstract) {
       /* Uses conditional nullification */
@@ -1323,11 +1151,7 @@
       }
 
       OpRegCopy(TargetReg(kArg0, kRef), class_reg);    // .ne case - arg0 <= class
-      if (cu_->target64) {
-        CallRuntimeHelper(QUICK_ENTRYPOINT_OFFSET(8, pInstanceofNonTrivial), false);
-      } else {
-        CallRuntimeHelper(QUICK_ENTRYPOINT_OFFSET(4, pInstanceofNonTrivial), false);
-      }
+      CallRuntimeHelper(kQuickInstanceofNonTrivial, false);
     }
   }
   // TODO: only clobber when type isn't final?
@@ -1336,7 +1160,7 @@
   LIR* target = NewLIR0(kPseudoTargetLabel);
   StoreValue(rl_dest, rl_result);
   branch1->target = target;
-  if (branchover != NULL) {
+  if (branchover != nullptr) {
     branchover->target = target;
   }
 }
@@ -1386,13 +1210,7 @@
     // Check we have access to type_idx and if not throw IllegalAccessError,
     // returns Class* in kRet0
     // InitializeTypeAndVerifyAccess(idx, method)
-    if (cu_->target64) {
-      CallRuntimeHelperImm(QUICK_ENTRYPOINT_OFFSET(8, pInitializeTypeAndVerifyAccess),
-                           type_idx, true);
-    } else {
-      CallRuntimeHelperImm(QUICK_ENTRYPOINT_OFFSET(4, pInitializeTypeAndVerifyAccess),
-                           type_idx, true);
-    }
+    CallRuntimeHelperImm(kQuickInitializeTypeAndVerifyAccess, type_idx, true);
     OpRegCopy(class_reg, TargetReg(kRet0, kRef));  // Align usage with fast path
   } else if (use_declaring_class) {
     LoadRefDisp(method_reg, mirror::ArtMethod::DeclaringClassOffset().Int32Value(),
@@ -1422,13 +1240,8 @@
 
           // Call out to helper, which will return resolved type in kArg0
           // InitializeTypeFromCode(idx, method)
-          if (m2l_->cu_->target64) {
-            m2l_->CallRuntimeHelperImmReg(QUICK_ENTRYPOINT_OFFSET(8, pInitializeType), type_idx_,
-                                          m2l_->TargetReg(kArg1, kRef), true);
-          } else {
-            m2l_->CallRuntimeHelperImmReg(QUICK_ENTRYPOINT_OFFSET(4, pInitializeType), type_idx_,
-                                          m2l_->TargetReg(kArg1, kRef), true);
-          }
+          m2l_->CallRuntimeHelperImmReg(kQuickInitializeType, type_idx_,
+                                        m2l_->TargetReg(kArg1, kRef), true);
           m2l_->OpRegCopy(class_reg_, m2l_->TargetReg(kRet0, kRef));  // Align usage with fast path
           m2l_->OpUnconditionalBranch(cont_);
         }
@@ -1459,16 +1272,8 @@
         m2l_->LoadRefDisp(m2l_->TargetReg(kArg0, kRef), mirror::Object::ClassOffset().Int32Value(),
                           m2l_->TargetReg(kArg1, kRef), kNotVolatile);
       }
-      if (m2l_->cu_->target64) {
-        m2l_->CallRuntimeHelperRegReg(QUICK_ENTRYPOINT_OFFSET(8, pCheckCast),
-                                      m2l_->TargetReg(kArg2, kRef), m2l_->TargetReg(kArg1, kRef),
-                                      true);
-      } else {
-        m2l_->CallRuntimeHelperRegReg(QUICK_ENTRYPOINT_OFFSET(4, pCheckCast),
-                                      m2l_->TargetReg(kArg2, kRef), m2l_->TargetReg(kArg1, kRef),
-                                      true);
-      }
-
+      m2l_->CallRuntimeHelperRegReg(kQuickCheckCast, m2l_->TargetReg(kArg2, kRef),
+                                    m2l_->TargetReg(kArg1, kRef), true);
       m2l_->OpUnconditionalBranch(cont_);
     }
 
@@ -1549,39 +1354,28 @@
   }
 }
 
-
-template <size_t pointer_size>
-static void GenShiftOpLongCall(Mir2Lir* mir_to_lir, Instruction::Code opcode, RegLocation rl_src1,
-                               RegLocation rl_shift) {
-  ThreadOffset<pointer_size> func_offset(-1);
-
+void Mir2Lir::GenShiftOpLong(Instruction::Code opcode, RegLocation rl_dest,
+                             RegLocation rl_src1, RegLocation rl_shift) {
+  QuickEntrypointEnum target;
   switch (opcode) {
     case Instruction::SHL_LONG:
     case Instruction::SHL_LONG_2ADDR:
-      func_offset = QUICK_ENTRYPOINT_OFFSET(pointer_size, pShlLong);
+      target = kQuickShlLong;
       break;
     case Instruction::SHR_LONG:
     case Instruction::SHR_LONG_2ADDR:
-      func_offset = QUICK_ENTRYPOINT_OFFSET(pointer_size, pShrLong);
+      target = kQuickShrLong;
       break;
     case Instruction::USHR_LONG:
     case Instruction::USHR_LONG_2ADDR:
-      func_offset = QUICK_ENTRYPOINT_OFFSET(pointer_size, pUshrLong);
+      target = kQuickUshrLong;
       break;
     default:
       LOG(FATAL) << "Unexpected case";
+      target = kQuickShlLong;
   }
-  mir_to_lir->FlushAllRegs();   /* Send everything to home location */
-  mir_to_lir->CallRuntimeHelperRegLocationRegLocation(func_offset, rl_src1, rl_shift, false);
-}
-
-void Mir2Lir::GenShiftOpLong(Instruction::Code opcode, RegLocation rl_dest,
-                             RegLocation rl_src1, RegLocation rl_shift) {
-  if (cu_->target64) {
-    GenShiftOpLongCall<8>(this, opcode, rl_src1, rl_shift);
-  } else {
-    GenShiftOpLongCall<4>(this, opcode, rl_src1, rl_shift);
-  }
+  FlushAllRegs();   /* Send everything to home location */
+  CallRuntimeHelperRegLocationRegLocation(target, rl_src1, rl_shift, false);
   RegLocation rl_result = GetReturnWide(kCoreReg);
   StoreValueWide(rl_dest, rl_result);
 }
@@ -1710,19 +1504,13 @@
     if (!done) {
       FlushAllRegs();   /* Send everything to home location */
       LoadValueDirectFixed(rl_src2, TargetReg(kArg1, kNotWide));
-      RegStorage r_tgt = cu_->target64 ?
-          CallHelperSetup(QUICK_ENTRYPOINT_OFFSET(8, pIdivmod)) :
-          CallHelperSetup(QUICK_ENTRYPOINT_OFFSET(4, pIdivmod));
+      RegStorage r_tgt = CallHelperSetup(kQuickIdivmod);
       LoadValueDirectFixed(rl_src1, TargetReg(kArg0, kNotWide));
       if (check_zero) {
         GenDivZeroCheck(TargetReg(kArg1, kNotWide));
       }
       // NOTE: callout here is not a safepoint.
-      if (cu_->target64) {
-        CallHelper(r_tgt, QUICK_ENTRYPOINT_OFFSET(8, pIdivmod), false /* not a safepoint */);
-      } else {
-        CallHelper(r_tgt, QUICK_ENTRYPOINT_OFFSET(4, pIdivmod), false /* not a safepoint */);
-      }
+      CallHelper(r_tgt, kQuickIdivmod, false /* not a safepoint */);
       if (op == kOpDiv)
         rl_result = GetReturn(kCoreReg);
       else
@@ -1981,13 +1769,7 @@
         FlushAllRegs();   /* Everything to home location. */
         LoadValueDirectFixed(rl_src, TargetReg(kArg0, kNotWide));
         Clobber(TargetReg(kArg0, kNotWide));
-        if (cu_->target64) {
-          CallRuntimeHelperRegImm(QUICK_ENTRYPOINT_OFFSET(8, pIdivmod), TargetReg(kArg0, kNotWide),
-                                  lit, false);
-        } else {
-          CallRuntimeHelperRegImm(QUICK_ENTRYPOINT_OFFSET(4, pIdivmod), TargetReg(kArg0, kNotWide),
-                                  lit, false);
-        }
+        CallRuntimeHelperRegImm(kQuickIdivmod, TargetReg(kArg0, kNotWide), lit, false);
         if (is_div)
           rl_result = GetReturn(kCoreReg);
         else
@@ -2010,42 +1792,41 @@
   StoreValue(rl_dest, rl_result);
 }
 
-template <size_t pointer_size>
-static void GenArithOpLongImpl(Mir2Lir* mir_to_lir, CompilationUnit* cu, Instruction::Code opcode,
-                               RegLocation rl_dest, RegLocation rl_src1, RegLocation rl_src2) {
+void Mir2Lir::GenArithOpLong(Instruction::Code opcode, RegLocation rl_dest,
+                             RegLocation rl_src1, RegLocation rl_src2) {
   RegLocation rl_result;
   OpKind first_op = kOpBkpt;
   OpKind second_op = kOpBkpt;
   bool call_out = false;
   bool check_zero = false;
-  ThreadOffset<pointer_size> func_offset(-1);
-  int ret_reg = mir_to_lir->TargetReg(kRet0, kNotWide).GetReg();
+  int ret_reg = TargetReg(kRet0, kNotWide).GetReg();
+  QuickEntrypointEnum target;
 
   switch (opcode) {
     case Instruction::NOT_LONG:
-      if (cu->instruction_set == kArm64 || cu->instruction_set == kX86_64) {
-        mir_to_lir->GenNotLong(rl_dest, rl_src2);
+      if (cu_->instruction_set == kArm64 || cu_->instruction_set == kX86_64) {
+        GenNotLong(rl_dest, rl_src2);
         return;
       }
-      rl_src2 = mir_to_lir->LoadValueWide(rl_src2, kCoreReg);
-      rl_result = mir_to_lir->EvalLoc(rl_dest, kCoreReg, true);
+      rl_src2 = LoadValueWide(rl_src2, kCoreReg);
+      rl_result = EvalLoc(rl_dest, kCoreReg, true);
       // Check for destructive overlap
       if (rl_result.reg.GetLowReg() == rl_src2.reg.GetHighReg()) {
-        RegStorage t_reg = mir_to_lir->AllocTemp();
-        mir_to_lir->OpRegCopy(t_reg, rl_src2.reg.GetHigh());
-        mir_to_lir->OpRegReg(kOpMvn, rl_result.reg.GetLow(), rl_src2.reg.GetLow());
-        mir_to_lir->OpRegReg(kOpMvn, rl_result.reg.GetHigh(), t_reg);
-        mir_to_lir->FreeTemp(t_reg);
+        RegStorage t_reg = AllocTemp();
+        OpRegCopy(t_reg, rl_src2.reg.GetHigh());
+        OpRegReg(kOpMvn, rl_result.reg.GetLow(), rl_src2.reg.GetLow());
+        OpRegReg(kOpMvn, rl_result.reg.GetHigh(), t_reg);
+        FreeTemp(t_reg);
       } else {
-        mir_to_lir->OpRegReg(kOpMvn, rl_result.reg.GetLow(), rl_src2.reg.GetLow());
-        mir_to_lir->OpRegReg(kOpMvn, rl_result.reg.GetHigh(), rl_src2.reg.GetHigh());
+        OpRegReg(kOpMvn, rl_result.reg.GetLow(), rl_src2.reg.GetLow());
+        OpRegReg(kOpMvn, rl_result.reg.GetHigh(), rl_src2.reg.GetHigh());
       }
-      mir_to_lir->StoreValueWide(rl_dest, rl_result);
+      StoreValueWide(rl_dest, rl_result);
       return;
     case Instruction::ADD_LONG:
     case Instruction::ADD_LONG_2ADDR:
-      if (cu->instruction_set != kThumb2) {
-        mir_to_lir->GenAddLong(opcode, rl_dest, rl_src1, rl_src2);
+      if (cu_->instruction_set != kThumb2) {
+        GenAddLong(opcode, rl_dest, rl_src1, rl_src2);
         return;
       }
       first_op = kOpAdd;
@@ -2053,8 +1834,8 @@
       break;
     case Instruction::SUB_LONG:
     case Instruction::SUB_LONG_2ADDR:
-      if (cu->instruction_set != kThumb2) {
-        mir_to_lir->GenSubLong(opcode, rl_dest, rl_src1, rl_src2);
+      if (cu_->instruction_set != kThumb2) {
+        GenSubLong(opcode, rl_dest, rl_src1, rl_src2);
         return;
       }
       first_op = kOpSub;
@@ -2062,53 +1843,53 @@
       break;
     case Instruction::MUL_LONG:
     case Instruction::MUL_LONG_2ADDR:
-      if (cu->instruction_set != kMips) {
-        mir_to_lir->GenMulLong(opcode, rl_dest, rl_src1, rl_src2);
+      if (cu_->instruction_set != kMips) {
+        GenMulLong(opcode, rl_dest, rl_src1, rl_src2);
         return;
       } else {
         call_out = true;
-        ret_reg = mir_to_lir->TargetReg(kRet0, kNotWide).GetReg();
-        func_offset = QUICK_ENTRYPOINT_OFFSET(pointer_size, pLmul);
+        TargetReg(kRet0, kNotWide).GetReg();
+        target = kQuickLmul;
       }
       break;
     case Instruction::DIV_LONG:
     case Instruction::DIV_LONG_2ADDR:
-      if (cu->instruction_set == kArm64 || cu->instruction_set == kX86_64) {
-        mir_to_lir->GenDivRemLong(opcode, rl_dest, rl_src1, rl_src2, /*is_div*/ true);
+      if (cu_->instruction_set == kArm64 || cu_->instruction_set == kX86_64) {
+        GenDivRemLong(opcode, rl_dest, rl_src1, rl_src2, /*is_div*/ true);
         return;
       }
       call_out = true;
       check_zero = true;
-      ret_reg = mir_to_lir->TargetReg(kRet0, kNotWide).GetReg();
-      func_offset = QUICK_ENTRYPOINT_OFFSET(pointer_size, pLdiv);
+      ret_reg = TargetReg(kRet0, kNotWide).GetReg();
+      target = kQuickLdiv;
       break;
     case Instruction::REM_LONG:
     case Instruction::REM_LONG_2ADDR:
-      if (cu->instruction_set == kArm64 || cu->instruction_set == kX86_64) {
-        mir_to_lir->GenDivRemLong(opcode, rl_dest, rl_src1, rl_src2, /*is_div*/ false);
+      if (cu_->instruction_set == kArm64 || cu_->instruction_set == kX86_64) {
+        GenDivRemLong(opcode, rl_dest, rl_src1, rl_src2, /*is_div*/ false);
         return;
       }
       call_out = true;
       check_zero = true;
-      func_offset = QUICK_ENTRYPOINT_OFFSET(pointer_size, pLmod);
+      target = kQuickLmod;
       /* NOTE - for Arm, result is in kArg2/kArg3 instead of kRet0/kRet1 */
-      ret_reg = (cu->instruction_set == kThumb2) ? mir_to_lir->TargetReg(kArg2, kNotWide).GetReg() :
-          mir_to_lir->TargetReg(kRet0, kNotWide).GetReg();
+      ret_reg = (cu_->instruction_set == kThumb2) ? TargetReg(kArg2, kNotWide).GetReg() :
+          TargetReg(kRet0, kNotWide).GetReg();
       break;
     case Instruction::AND_LONG_2ADDR:
     case Instruction::AND_LONG:
-      if (cu->instruction_set == kX86 || cu->instruction_set == kX86_64 ||
-          cu->instruction_set == kArm64) {
-        return mir_to_lir->GenAndLong(opcode, rl_dest, rl_src1, rl_src2);
+      if (cu_->instruction_set == kX86 || cu_->instruction_set == kX86_64 ||
+          cu_->instruction_set == kArm64) {
+        return GenAndLong(opcode, rl_dest, rl_src1, rl_src2);
       }
       first_op = kOpAnd;
       second_op = kOpAnd;
       break;
     case Instruction::OR_LONG:
     case Instruction::OR_LONG_2ADDR:
-      if (cu->instruction_set == kX86 || cu->instruction_set == kX86_64 ||
-          cu->instruction_set == kArm64) {
-        mir_to_lir->GenOrLong(opcode, rl_dest, rl_src1, rl_src2);
+      if (cu_->instruction_set == kX86 || cu_->instruction_set == kX86_64 ||
+          cu_->instruction_set == kArm64) {
+        GenOrLong(opcode, rl_dest, rl_src1, rl_src2);
         return;
       }
       first_op = kOpOr;
@@ -2116,52 +1897,43 @@
       break;
     case Instruction::XOR_LONG:
     case Instruction::XOR_LONG_2ADDR:
-      if (cu->instruction_set == kX86 || cu->instruction_set == kX86_64 ||
-          cu->instruction_set == kArm64) {
-        mir_to_lir->GenXorLong(opcode, rl_dest, rl_src1, rl_src2);
+      if (cu_->instruction_set == kX86 || cu_->instruction_set == kX86_64 ||
+          cu_->instruction_set == kArm64) {
+        GenXorLong(opcode, rl_dest, rl_src1, rl_src2);
         return;
       }
       first_op = kOpXor;
       second_op = kOpXor;
       break;
     case Instruction::NEG_LONG: {
-      mir_to_lir->GenNegLong(rl_dest, rl_src2);
+      GenNegLong(rl_dest, rl_src2);
       return;
     }
     default:
       LOG(FATAL) << "Invalid long arith op";
   }
   if (!call_out) {
-    mir_to_lir->GenLong3Addr(first_op, second_op, rl_dest, rl_src1, rl_src2);
+    GenLong3Addr(first_op, second_op, rl_dest, rl_src1, rl_src2);
   } else {
-    mir_to_lir->FlushAllRegs();   /* Send everything to home location */
+    FlushAllRegs();   /* Send everything to home location */
     if (check_zero) {
-      RegStorage r_tmp1 = mir_to_lir->TargetReg(kArg0, kWide);
-      RegStorage r_tmp2 = mir_to_lir->TargetReg(kArg2, kWide);
-      mir_to_lir->LoadValueDirectWideFixed(rl_src2, r_tmp2);
-      RegStorage r_tgt = mir_to_lir->CallHelperSetup(func_offset);
-      mir_to_lir->GenDivZeroCheckWide(r_tmp2);
-      mir_to_lir->LoadValueDirectWideFixed(rl_src1, r_tmp1);
+      RegStorage r_tmp1 = TargetReg(kArg0, kWide);
+      RegStorage r_tmp2 = TargetReg(kArg2, kWide);
+      LoadValueDirectWideFixed(rl_src2, r_tmp2);
+      RegStorage r_tgt = CallHelperSetup(target);
+      GenDivZeroCheckWide(r_tmp2);
+      LoadValueDirectWideFixed(rl_src1, r_tmp1);
       // NOTE: callout here is not a safepoint
-      mir_to_lir->CallHelper(r_tgt, func_offset, false /* not safepoint */);
+      CallHelper(r_tgt, target, false /* not safepoint */);
     } else {
-      mir_to_lir->CallRuntimeHelperRegLocationRegLocation(func_offset, rl_src1, rl_src2, false);
+      CallRuntimeHelperRegLocationRegLocation(target, rl_src1, rl_src2, false);
     }
     // Adjust return regs in to handle case of rem returning kArg2/kArg3
-    if (ret_reg == mir_to_lir->TargetReg(kRet0, kNotWide).GetReg())
-      rl_result = mir_to_lir->GetReturnWide(kCoreReg);
+    if (ret_reg == TargetReg(kRet0, kNotWide).GetReg())
+      rl_result = GetReturnWide(kCoreReg);
     else
-      rl_result = mir_to_lir->GetReturnWideAlt();
-    mir_to_lir->StoreValueWide(rl_dest, rl_result);
-  }
-}
-
-void Mir2Lir::GenArithOpLong(Instruction::Code opcode, RegLocation rl_dest,
-                             RegLocation rl_src1, RegLocation rl_src2) {
-  if (cu_->target64) {
-    GenArithOpLongImpl<8>(this, cu_, opcode, rl_dest, rl_src1, rl_src2);
-  } else {
-    GenArithOpLongImpl<4>(this, cu_, opcode, rl_dest, rl_src1, rl_src2);
+      rl_result = GetReturnWideAlt();
+    StoreValueWide(rl_dest, rl_result);
   }
 }
 
@@ -2174,17 +1946,15 @@
   }
 }
 
-template <size_t pointer_size>
-void Mir2Lir::GenConversionCall(ThreadOffset<pointer_size> func_offset,
-                                RegLocation rl_dest, RegLocation rl_src) {
+void Mir2Lir::GenConversionCall(QuickEntrypointEnum trampoline, RegLocation rl_dest,
+                                RegLocation rl_src) {
   /*
    * Don't optimize the register usage since it calls out to support
    * functions
    */
-  DCHECK_EQ(pointer_size, GetInstructionSetPointerSize(cu_->instruction_set));
 
   FlushAllRegs();   /* Send everything to home location */
-  CallRuntimeHelperRegLocation(func_offset, rl_src, false);
+  CallRuntimeHelperRegLocation(trampoline, rl_src, false);
   if (rl_dest.wide) {
     RegLocation rl_result;
     rl_result = GetReturnWide(LocToRegClass(rl_dest));
@@ -2195,10 +1965,6 @@
     StoreValue(rl_dest, rl_result);
   }
 }
-template void Mir2Lir::GenConversionCall(ThreadOffset<4> func_offset,
-                                         RegLocation rl_dest, RegLocation rl_src);
-template void Mir2Lir::GenConversionCall(ThreadOffset<8> func_offset,
-                                         RegLocation rl_dest, RegLocation rl_src);
 
 class SuspendCheckSlowPath : public Mir2Lir::LIRSlowPath {
  public:
@@ -2210,11 +1976,7 @@
     m2l_->ResetRegPool();
     m2l_->ResetDefTracking();
     GenerateTargetLabel(kPseudoSuspendTarget);
-    if (cu_->target64) {
-      m2l_->CallRuntimeHelper(QUICK_ENTRYPOINT_OFFSET(8, pTestSuspend), true);
-    } else {
-      m2l_->CallRuntimeHelper(QUICK_ENTRYPOINT_OFFSET(4, pTestSuspend), true);
-    }
+    m2l_->CallRuntimeHelper(kQuickTestSuspend, true);
     if (cont_ != nullptr) {
       m2l_->OpUnconditionalBranch(cont_);
     }
@@ -2269,21 +2031,13 @@
 /* Call out to helper assembly routine that will null check obj and then lock it. */
 void Mir2Lir::GenMonitorEnter(int opt_flags, RegLocation rl_src) {
   FlushAllRegs();
-  if (cu_->target64) {
-    CallRuntimeHelperRegLocation(QUICK_ENTRYPOINT_OFFSET(8, pLockObject), rl_src, true);
-  } else {
-    CallRuntimeHelperRegLocation(QUICK_ENTRYPOINT_OFFSET(4, pLockObject), rl_src, true);
-  }
+  CallRuntimeHelperRegLocation(kQuickLockObject, rl_src, true);
 }
 
 /* Call out to helper assembly routine that will null check obj and then unlock it. */
 void Mir2Lir::GenMonitorExit(int opt_flags, RegLocation rl_src) {
   FlushAllRegs();
-  if (cu_->target64) {
-    CallRuntimeHelperRegLocation(QUICK_ENTRYPOINT_OFFSET(8, pUnlockObject), rl_src, true);
-  } else {
-    CallRuntimeHelperRegLocation(QUICK_ENTRYPOINT_OFFSET(4, pUnlockObject), rl_src, true);
-  }
+  CallRuntimeHelperRegLocation(kQuickUnlockObject, rl_src, true);
 }
 
 /* Generic code for generating a wide constant into a VR. */
diff --git a/compiler/dex/quick/gen_invoke.cc b/compiler/dex/quick/gen_invoke.cc
index 2c69593..ea52b39 100755
--- a/compiler/dex/quick/gen_invoke.cc
+++ b/compiler/dex/quick/gen_invoke.cc
@@ -67,13 +67,6 @@
   AddSlowPath(new (arena_) IntrinsicSlowPathPath(this, info, branch, resume));
 }
 
-// Macro to help instantiate.
-// TODO: This might be used to only instantiate <4> on pure 32b systems.
-#define INSTANTIATE(sig_part1, ...) \
-  template sig_part1(ThreadOffset<4>, __VA_ARGS__); \
-  template sig_part1(ThreadOffset<8>, __VA_ARGS__); \
-
-
 /*
  * To save scheduling time, helper calls are broken into two parts: generation of
  * the helper target address, and the actual call to the helper.  Because x86
@@ -81,107 +74,73 @@
  * load arguments between the two parts.
  */
 // template <size_t pointer_size>
-RegStorage Mir2Lir::CallHelperSetup(ThreadOffset<4> helper_offset) {
-  // All CallRuntimeHelperXXX call this first. So make a central check here.
-  DCHECK_EQ(4U, GetInstructionSetPointerSize(cu_->instruction_set));
-
+RegStorage Mir2Lir::CallHelperSetup(QuickEntrypointEnum trampoline) {
   if (cu_->instruction_set == kX86 || cu_->instruction_set == kX86_64) {
     return RegStorage::InvalidReg();
   } else {
-    return LoadHelper(helper_offset);
+    return LoadHelper(trampoline);
   }
 }
 
-RegStorage Mir2Lir::CallHelperSetup(ThreadOffset<8> helper_offset) {
-  // All CallRuntimeHelperXXX call this first. So make a central check here.
-  DCHECK_EQ(8U, GetInstructionSetPointerSize(cu_->instruction_set));
+LIR* Mir2Lir::CallHelper(RegStorage r_tgt, QuickEntrypointEnum trampoline, bool safepoint_pc,
+                         bool use_link) {
+  LIR* call_inst = InvokeTrampoline(use_link ? kOpBlx : kOpBx, r_tgt, trampoline);
 
-  if (cu_->instruction_set == kX86 || cu_->instruction_set == kX86_64) {
-    return RegStorage::InvalidReg();
-  } else {
-    return LoadHelper(helper_offset);
-  }
-}
-
-/* NOTE: if r_tgt is a temp, it will be freed following use */
-template <size_t pointer_size>
-LIR* Mir2Lir::CallHelper(RegStorage r_tgt, ThreadOffset<pointer_size> helper_offset,
-                         bool safepoint_pc, bool use_link) {
-  LIR* call_inst;
-  OpKind op = use_link ? kOpBlx : kOpBx;
-  if (cu_->instruction_set == kX86 || cu_->instruction_set == kX86_64) {
-    call_inst = OpThreadMem(op, helper_offset);
-  } else {
-    call_inst = OpReg(op, r_tgt);
+  if (r_tgt.Valid()) {
     FreeTemp(r_tgt);
   }
+
   if (safepoint_pc) {
     MarkSafepointPC(call_inst);
   }
   return call_inst;
 }
-template LIR* Mir2Lir::CallHelper(RegStorage r_tgt, ThreadOffset<4> helper_offset,
-                                        bool safepoint_pc, bool use_link);
-template LIR* Mir2Lir::CallHelper(RegStorage r_tgt, ThreadOffset<8> helper_offset,
-                                        bool safepoint_pc, bool use_link);
 
-template <size_t pointer_size>
-void Mir2Lir::CallRuntimeHelper(ThreadOffset<pointer_size> helper_offset, bool safepoint_pc) {
-  RegStorage r_tgt = CallHelperSetup(helper_offset);
+void Mir2Lir::CallRuntimeHelper(QuickEntrypointEnum trampoline, bool safepoint_pc) {
+  RegStorage r_tgt = CallHelperSetup(trampoline);
   ClobberCallerSave();
-  CallHelper<pointer_size>(r_tgt, helper_offset, safepoint_pc);
+  CallHelper(r_tgt, trampoline, safepoint_pc);
 }
-INSTANTIATE(void Mir2Lir::CallRuntimeHelper, bool safepoint_pc)
 
-template <size_t pointer_size>
-void Mir2Lir::CallRuntimeHelperImm(ThreadOffset<pointer_size> helper_offset, int arg0,
-                                   bool safepoint_pc) {
-  RegStorage r_tgt = CallHelperSetup(helper_offset);
+void Mir2Lir::CallRuntimeHelperImm(QuickEntrypointEnum trampoline, int arg0, bool safepoint_pc) {
+  RegStorage r_tgt = CallHelperSetup(trampoline);
   LoadConstant(TargetReg(kArg0, kNotWide), arg0);
   ClobberCallerSave();
-  CallHelper<pointer_size>(r_tgt, helper_offset, safepoint_pc);
+  CallHelper(r_tgt, trampoline, safepoint_pc);
 }
-INSTANTIATE(void Mir2Lir::CallRuntimeHelperImm, int arg0, bool safepoint_pc)
 
-template <size_t pointer_size>
-void Mir2Lir::CallRuntimeHelperReg(ThreadOffset<pointer_size> helper_offset, RegStorage arg0,
+void Mir2Lir::CallRuntimeHelperReg(QuickEntrypointEnum trampoline, RegStorage arg0,
                                    bool safepoint_pc) {
-  RegStorage r_tgt = CallHelperSetup(helper_offset);
+  RegStorage r_tgt = CallHelperSetup(trampoline);
   OpRegCopy(TargetReg(kArg0, arg0.GetWideKind()), arg0);
   ClobberCallerSave();
-  CallHelper<pointer_size>(r_tgt, helper_offset, safepoint_pc);
+  CallHelper(r_tgt, trampoline, safepoint_pc);
 }
-INSTANTIATE(void Mir2Lir::CallRuntimeHelperReg, RegStorage arg0, bool safepoint_pc)
 
-template <size_t pointer_size>
-void Mir2Lir::CallRuntimeHelperRegLocation(ThreadOffset<pointer_size> helper_offset,
-                                           RegLocation arg0, bool safepoint_pc) {
-  RegStorage r_tgt = CallHelperSetup(helper_offset);
+void Mir2Lir::CallRuntimeHelperRegLocation(QuickEntrypointEnum trampoline, RegLocation arg0,
+                                           bool safepoint_pc) {
+  RegStorage r_tgt = CallHelperSetup(trampoline);
   if (arg0.wide == 0) {
     LoadValueDirectFixed(arg0, TargetReg(arg0.fp ? kFArg0 : kArg0, arg0));
   } else {
     LoadValueDirectWideFixed(arg0, TargetReg(arg0.fp ? kFArg0 : kArg0, kWide));
   }
   ClobberCallerSave();
-  CallHelper<pointer_size>(r_tgt, helper_offset, safepoint_pc);
+  CallHelper(r_tgt, trampoline, safepoint_pc);
 }
-INSTANTIATE(void Mir2Lir::CallRuntimeHelperRegLocation, RegLocation arg0, bool safepoint_pc)
 
-template <size_t pointer_size>
-void Mir2Lir::CallRuntimeHelperImmImm(ThreadOffset<pointer_size> helper_offset, int arg0, int arg1,
+void Mir2Lir::CallRuntimeHelperImmImm(QuickEntrypointEnum trampoline, int arg0, int arg1,
                                       bool safepoint_pc) {
-  RegStorage r_tgt = CallHelperSetup(helper_offset);
+  RegStorage r_tgt = CallHelperSetup(trampoline);
   LoadConstant(TargetReg(kArg0, kNotWide), arg0);
   LoadConstant(TargetReg(kArg1, kNotWide), arg1);
   ClobberCallerSave();
-  CallHelper<pointer_size>(r_tgt, helper_offset, safepoint_pc);
+  CallHelper(r_tgt, trampoline, safepoint_pc);
 }
-INSTANTIATE(void Mir2Lir::CallRuntimeHelperImmImm, int arg0, int arg1, bool safepoint_pc)
 
-template <size_t pointer_size>
-void Mir2Lir::CallRuntimeHelperImmRegLocation(ThreadOffset<pointer_size> helper_offset, int arg0,
+void Mir2Lir::CallRuntimeHelperImmRegLocation(QuickEntrypointEnum trampoline, int arg0,
                                               RegLocation arg1, bool safepoint_pc) {
-  RegStorage r_tgt = CallHelperSetup(helper_offset);
+  RegStorage r_tgt = CallHelperSetup(trampoline);
   DCHECK(!arg1.fp);
   if (arg1.wide == 0) {
     LoadValueDirectFixed(arg1, TargetReg(kArg1, arg1));
@@ -191,61 +150,49 @@
   }
   LoadConstant(TargetReg(kArg0, kNotWide), arg0);
   ClobberCallerSave();
-  CallHelper<pointer_size>(r_tgt, helper_offset, safepoint_pc);
+  CallHelper(r_tgt, trampoline, safepoint_pc);
 }
-INSTANTIATE(void Mir2Lir::CallRuntimeHelperImmRegLocation, int arg0, RegLocation arg1,
-            bool safepoint_pc)
 
-template <size_t pointer_size>
-void Mir2Lir::CallRuntimeHelperRegLocationImm(ThreadOffset<pointer_size> helper_offset,
-                                              RegLocation arg0, int arg1, bool safepoint_pc) {
-  RegStorage r_tgt = CallHelperSetup(helper_offset);
+void Mir2Lir::CallRuntimeHelperRegLocationImm(QuickEntrypointEnum trampoline, RegLocation arg0,
+                                              int arg1, bool safepoint_pc) {
+  RegStorage r_tgt = CallHelperSetup(trampoline);
   DCHECK(!arg0.wide);
   LoadValueDirectFixed(arg0, TargetReg(kArg0, arg0));
   LoadConstant(TargetReg(kArg1, kNotWide), arg1);
   ClobberCallerSave();
-  CallHelper<pointer_size>(r_tgt, helper_offset, safepoint_pc);
+  CallHelper(r_tgt, trampoline, safepoint_pc);
 }
-INSTANTIATE(void Mir2Lir::CallRuntimeHelperRegLocationImm, RegLocation arg0, int arg1,
-            bool safepoint_pc)
 
-template <size_t pointer_size>
-void Mir2Lir::CallRuntimeHelperImmReg(ThreadOffset<pointer_size> helper_offset, int arg0,
-                                      RegStorage arg1, bool safepoint_pc) {
-  RegStorage r_tgt = CallHelperSetup(helper_offset);
+void Mir2Lir::CallRuntimeHelperImmReg(QuickEntrypointEnum trampoline, int arg0, RegStorage arg1,
+                                      bool safepoint_pc) {
+  RegStorage r_tgt = CallHelperSetup(trampoline);
   OpRegCopy(TargetReg(kArg1, arg1.GetWideKind()), arg1);
   LoadConstant(TargetReg(kArg0, kNotWide), arg0);
   ClobberCallerSave();
-  CallHelper<pointer_size>(r_tgt, helper_offset, safepoint_pc);
+  CallHelper(r_tgt, trampoline, safepoint_pc);
 }
-INSTANTIATE(void Mir2Lir::CallRuntimeHelperImmReg, int arg0, RegStorage arg1, bool safepoint_pc)
 
-template <size_t pointer_size>
-void Mir2Lir::CallRuntimeHelperRegImm(ThreadOffset<pointer_size> helper_offset, RegStorage arg0,
-                                      int arg1, bool safepoint_pc) {
-  RegStorage r_tgt = CallHelperSetup(helper_offset);
+void Mir2Lir::CallRuntimeHelperRegImm(QuickEntrypointEnum trampoline, RegStorage arg0, int arg1,
+                                      bool safepoint_pc) {
+  RegStorage r_tgt = CallHelperSetup(trampoline);
   OpRegCopy(TargetReg(kArg0, arg0.GetWideKind()), arg0);
   LoadConstant(TargetReg(kArg1, kNotWide), arg1);
   ClobberCallerSave();
-  CallHelper<pointer_size>(r_tgt, helper_offset, safepoint_pc);
+  CallHelper(r_tgt, trampoline, safepoint_pc);
 }
-INSTANTIATE(void Mir2Lir::CallRuntimeHelperRegImm, RegStorage arg0, int arg1, bool safepoint_pc)
 
-template <size_t pointer_size>
-void Mir2Lir::CallRuntimeHelperImmMethod(ThreadOffset<pointer_size> helper_offset, int arg0,
+void Mir2Lir::CallRuntimeHelperImmMethod(QuickEntrypointEnum trampoline, int arg0,
                                          bool safepoint_pc) {
-  RegStorage r_tgt = CallHelperSetup(helper_offset);
+  RegStorage r_tgt = CallHelperSetup(trampoline);
   LoadCurrMethodDirect(TargetReg(kArg1, kRef));
   LoadConstant(TargetReg(kArg0, kNotWide), arg0);
   ClobberCallerSave();
-  CallHelper<pointer_size>(r_tgt, helper_offset, safepoint_pc);
+  CallHelper(r_tgt, trampoline, safepoint_pc);
 }
-INSTANTIATE(void Mir2Lir::CallRuntimeHelperImmMethod, int arg0, bool safepoint_pc)
 
-template <size_t pointer_size>
-void Mir2Lir::CallRuntimeHelperRegMethod(ThreadOffset<pointer_size> helper_offset, RegStorage arg0,
+void Mir2Lir::CallRuntimeHelperRegMethod(QuickEntrypointEnum trampoline, RegStorage arg0,
                                          bool safepoint_pc) {
-  RegStorage r_tgt = CallHelperSetup(helper_offset);
+  RegStorage r_tgt = CallHelperSetup(trampoline);
   DCHECK(!IsSameReg(TargetReg(kArg1, arg0.GetWideKind()), arg0));
   RegStorage r_tmp = TargetReg(kArg0, arg0.GetWideKind());
   if (r_tmp.NotExactlyEquals(arg0)) {
@@ -253,15 +200,12 @@
   }
   LoadCurrMethodDirect(TargetReg(kArg1, kRef));
   ClobberCallerSave();
-  CallHelper<pointer_size>(r_tgt, helper_offset, safepoint_pc);
+  CallHelper(r_tgt, trampoline, safepoint_pc);
 }
-INSTANTIATE(void Mir2Lir::CallRuntimeHelperRegMethod, RegStorage arg0, bool safepoint_pc)
 
-template <size_t pointer_size>
-void Mir2Lir::CallRuntimeHelperRegMethodRegLocation(ThreadOffset<pointer_size> helper_offset,
-                                                    RegStorage arg0, RegLocation arg2,
-                                                    bool safepoint_pc) {
-  RegStorage r_tgt = CallHelperSetup(helper_offset);
+void Mir2Lir::CallRuntimeHelperRegMethodRegLocation(QuickEntrypointEnum trampoline, RegStorage arg0,
+                                                    RegLocation arg2, bool safepoint_pc) {
+  RegStorage r_tgt = CallHelperSetup(trampoline);
   DCHECK(!IsSameReg(TargetReg(kArg1, arg0.GetWideKind()), arg0));
   RegStorage r_tmp = TargetReg(kArg0, arg0.GetWideKind());
   if (r_tmp.NotExactlyEquals(arg0)) {
@@ -270,16 +214,13 @@
   LoadCurrMethodDirect(TargetReg(kArg1, kRef));
   LoadValueDirectFixed(arg2, TargetReg(kArg2, arg2));
   ClobberCallerSave();
-  CallHelper<pointer_size>(r_tgt, helper_offset, safepoint_pc);
+  CallHelper(r_tgt, trampoline, safepoint_pc);
 }
-INSTANTIATE(void Mir2Lir::CallRuntimeHelperRegMethodRegLocation, RegStorage arg0, RegLocation arg2,
-            bool safepoint_pc)
 
-template <size_t pointer_size>
-void Mir2Lir::CallRuntimeHelperRegLocationRegLocation(ThreadOffset<pointer_size> helper_offset,
+void Mir2Lir::CallRuntimeHelperRegLocationRegLocation(QuickEntrypointEnum trampoline,
                                                       RegLocation arg0, RegLocation arg1,
                                                       bool safepoint_pc) {
-  RegStorage r_tgt = CallHelperSetup(helper_offset);
+  RegStorage r_tgt = CallHelperSetup(trampoline);
   if (cu_->instruction_set == kArm64 || cu_->instruction_set == kX86_64) {
     RegStorage arg0_reg = TargetReg((arg0.fp) ? kFArg0 : kArg0, arg0);
 
@@ -328,10 +269,8 @@
     }
   }
   ClobberCallerSave();
-  CallHelper<pointer_size>(r_tgt, helper_offset, safepoint_pc);
+  CallHelper(r_tgt, trampoline, safepoint_pc);
 }
-INSTANTIATE(void Mir2Lir::CallRuntimeHelperRegLocationRegLocation, RegLocation arg0,
-            RegLocation arg1, bool safepoint_pc)
 
 void Mir2Lir::CopyToArgumentRegs(RegStorage arg0, RegStorage arg1) {
   WideKind arg0_kind = arg0.GetWideKind();
@@ -352,59 +291,47 @@
   }
 }
 
-template <size_t pointer_size>
-void Mir2Lir::CallRuntimeHelperRegReg(ThreadOffset<pointer_size> helper_offset, RegStorage arg0,
+void Mir2Lir::CallRuntimeHelperRegReg(QuickEntrypointEnum trampoline, RegStorage arg0,
                                       RegStorage arg1, bool safepoint_pc) {
-  RegStorage r_tgt = CallHelperSetup(helper_offset);
+  RegStorage r_tgt = CallHelperSetup(trampoline);
   CopyToArgumentRegs(arg0, arg1);
   ClobberCallerSave();
-  CallHelper<pointer_size>(r_tgt, helper_offset, safepoint_pc);
+  CallHelper(r_tgt, trampoline, safepoint_pc);
 }
-INSTANTIATE(void Mir2Lir::CallRuntimeHelperRegReg, RegStorage arg0, RegStorage arg1,
-            bool safepoint_pc)
 
-template <size_t pointer_size>
-void Mir2Lir::CallRuntimeHelperRegRegImm(ThreadOffset<pointer_size> helper_offset, RegStorage arg0,
+void Mir2Lir::CallRuntimeHelperRegRegImm(QuickEntrypointEnum trampoline, RegStorage arg0,
                                          RegStorage arg1, int arg2, bool safepoint_pc) {
-  RegStorage r_tgt = CallHelperSetup(helper_offset);
+  RegStorage r_tgt = CallHelperSetup(trampoline);
   CopyToArgumentRegs(arg0, arg1);
   LoadConstant(TargetReg(kArg2, kNotWide), arg2);
   ClobberCallerSave();
-  CallHelper<pointer_size>(r_tgt, helper_offset, safepoint_pc);
+  CallHelper(r_tgt, trampoline, safepoint_pc);
 }
-INSTANTIATE(void Mir2Lir::CallRuntimeHelperRegRegImm, RegStorage arg0, RegStorage arg1, int arg2,
-            bool safepoint_pc)
 
-template <size_t pointer_size>
-void Mir2Lir::CallRuntimeHelperImmMethodRegLocation(ThreadOffset<pointer_size> helper_offset,
-                                                    int arg0, RegLocation arg2, bool safepoint_pc) {
-  RegStorage r_tgt = CallHelperSetup(helper_offset);
+void Mir2Lir::CallRuntimeHelperImmMethodRegLocation(QuickEntrypointEnum trampoline, int arg0,
+                                                    RegLocation arg2, bool safepoint_pc) {
+  RegStorage r_tgt = CallHelperSetup(trampoline);
   LoadValueDirectFixed(arg2, TargetReg(kArg2, arg2));
   LoadCurrMethodDirect(TargetReg(kArg1, kRef));
   LoadConstant(TargetReg(kArg0, kNotWide), arg0);
   ClobberCallerSave();
-  CallHelper<pointer_size>(r_tgt, helper_offset, safepoint_pc);
+  CallHelper(r_tgt, trampoline, safepoint_pc);
 }
-INSTANTIATE(void Mir2Lir::CallRuntimeHelperImmMethodRegLocation, int arg0, RegLocation arg2,
-            bool safepoint_pc)
 
-template <size_t pointer_size>
-void Mir2Lir::CallRuntimeHelperImmMethodImm(ThreadOffset<pointer_size> helper_offset, int arg0,
-                                            int arg2, bool safepoint_pc) {
-  RegStorage r_tgt = CallHelperSetup(helper_offset);
+void Mir2Lir::CallRuntimeHelperImmMethodImm(QuickEntrypointEnum trampoline, int arg0, int arg2,
+                                            bool safepoint_pc) {
+  RegStorage r_tgt = CallHelperSetup(trampoline);
   LoadCurrMethodDirect(TargetReg(kArg1, kRef));
   LoadConstant(TargetReg(kArg2, kNotWide), arg2);
   LoadConstant(TargetReg(kArg0, kNotWide), arg0);
   ClobberCallerSave();
-  CallHelper<pointer_size>(r_tgt, helper_offset, safepoint_pc);
+  CallHelper(r_tgt, trampoline, safepoint_pc);
 }
-INSTANTIATE(void Mir2Lir::CallRuntimeHelperImmMethodImm, int arg0, int arg2, bool safepoint_pc)
 
-template <size_t pointer_size>
-void Mir2Lir::CallRuntimeHelperImmRegLocationRegLocation(ThreadOffset<pointer_size> helper_offset,
-                                                         int arg0, RegLocation arg1,
+void Mir2Lir::CallRuntimeHelperImmRegLocationRegLocation(QuickEntrypointEnum trampoline, int arg0,
+                                                         RegLocation arg1,
                                                          RegLocation arg2, bool safepoint_pc) {
-  RegStorage r_tgt = CallHelperSetup(helper_offset);
+  RegStorage r_tgt = CallHelperSetup(trampoline);
   DCHECK_EQ(static_cast<unsigned int>(arg1.wide), 0U);  // The static_cast works around an
                                                         // instantiation bug in GCC.
   LoadValueDirectFixed(arg1, TargetReg(kArg1, arg1));
@@ -415,27 +342,22 @@
   }
   LoadConstant(TargetReg(kArg0, kNotWide), arg0);
   ClobberCallerSave();
-  CallHelper<pointer_size>(r_tgt, helper_offset, safepoint_pc);
+  CallHelper(r_tgt, trampoline, safepoint_pc);
 }
-INSTANTIATE(void Mir2Lir::CallRuntimeHelperImmRegLocationRegLocation, int arg0, RegLocation arg1,
-            RegLocation arg2, bool safepoint_pc)
 
-template <size_t pointer_size>
 void Mir2Lir::CallRuntimeHelperRegLocationRegLocationRegLocation(
-    ThreadOffset<pointer_size> helper_offset,
+    QuickEntrypointEnum trampoline,
     RegLocation arg0,
     RegLocation arg1,
     RegLocation arg2,
     bool safepoint_pc) {
-  RegStorage r_tgt = CallHelperSetup(helper_offset);
+  RegStorage r_tgt = CallHelperSetup(trampoline);
   LoadValueDirectFixed(arg0, TargetReg(kArg0, arg0));
   LoadValueDirectFixed(arg1, TargetReg(kArg1, arg1));
   LoadValueDirectFixed(arg2, TargetReg(kArg2, arg2));
   ClobberCallerSave();
-  CallHelper<pointer_size>(r_tgt, helper_offset, safepoint_pc);
+  CallHelper(r_tgt, trampoline, safepoint_pc);
 }
-INSTANTIATE(void Mir2Lir::CallRuntimeHelperRegLocationRegLocationRegLocation, RegLocation arg0,
-            RegLocation arg1, RegLocation arg2, bool safepoint_pc)
 
 /*
  * If there are any ins passed in registers that have not been promoted
@@ -727,11 +649,12 @@
   return state + 1;
 }
 
-template <size_t pointer_size>
 static int NextInvokeInsnSP(CompilationUnit* cu, CallInfo* info,
-                            ThreadOffset<pointer_size> trampoline, int state,
+                            QuickEntrypointEnum trampoline, int state,
                             const MethodReference& target_method, uint32_t method_idx) {
   Mir2Lir* cg = static_cast<Mir2Lir*>(cu->cg.get());
+
+
   /*
    * This handles the case in which the base method is not fully
    * resolved at compile time, we bail to a runtime helper.
@@ -739,8 +662,13 @@
   if (state == 0) {
     if (cu->instruction_set != kX86 && cu->instruction_set != kX86_64) {
       // Load trampoline target
-      cg->LoadWordDisp(cg->TargetPtrReg(kSelf), trampoline.Int32Value(),
-                       cg->TargetPtrReg(kInvokeTgt));
+      int32_t disp;
+      if (cu->target64) {
+        disp = GetThreadOffset<8>(trampoline).Int32Value();
+      } else {
+        disp = GetThreadOffset<4>(trampoline).Int32Value();
+      }
+      cg->LoadWordDisp(cg->TargetPtrReg(kSelf), disp, cg->TargetPtrReg(kInvokeTgt));
     }
     // Load kArg0 with method index
     CHECK_EQ(cu->dex_file, target_method.dex_file);
@@ -755,54 +683,32 @@
                                 const MethodReference& target_method,
                                 uint32_t unused, uintptr_t unused2,
                                 uintptr_t unused3, InvokeType unused4) {
-  if (cu->target64) {
-    ThreadOffset<8> trampoline = QUICK_ENTRYPOINT_OFFSET(8, pInvokeStaticTrampolineWithAccessCheck);
-    return NextInvokeInsnSP<8>(cu, info, trampoline, state, target_method, 0);
-  } else {
-    ThreadOffset<4> trampoline = QUICK_ENTRYPOINT_OFFSET(4, pInvokeStaticTrampolineWithAccessCheck);
-    return NextInvokeInsnSP<4>(cu, info, trampoline, state, target_method, 0);
-  }
+  return NextInvokeInsnSP(cu, info, kQuickInvokeStaticTrampolineWithAccessCheck, state,
+                          target_method, 0);
 }
 
 static int NextDirectCallInsnSP(CompilationUnit* cu, CallInfo* info, int state,
                                 const MethodReference& target_method,
                                 uint32_t unused, uintptr_t unused2,
                                 uintptr_t unused3, InvokeType unused4) {
-  if (cu->target64) {
-    ThreadOffset<8> trampoline = QUICK_ENTRYPOINT_OFFSET(8, pInvokeDirectTrampolineWithAccessCheck);
-    return NextInvokeInsnSP<8>(cu, info, trampoline, state, target_method, 0);
-  } else {
-    ThreadOffset<4> trampoline = QUICK_ENTRYPOINT_OFFSET(4, pInvokeDirectTrampolineWithAccessCheck);
-    return NextInvokeInsnSP<4>(cu, info, trampoline, state, target_method, 0);
-  }
+  return NextInvokeInsnSP(cu, info, kQuickInvokeDirectTrampolineWithAccessCheck, state,
+                          target_method, 0);
 }
 
 static int NextSuperCallInsnSP(CompilationUnit* cu, CallInfo* info, int state,
                                const MethodReference& target_method,
                                uint32_t unused, uintptr_t unused2,
                                uintptr_t unused3, InvokeType unused4) {
-  if (cu->target64) {
-    ThreadOffset<8> trampoline = QUICK_ENTRYPOINT_OFFSET(8, pInvokeSuperTrampolineWithAccessCheck);
-    return NextInvokeInsnSP<8>(cu, info, trampoline, state, target_method, 0);
-  } else {
-    ThreadOffset<4> trampoline = QUICK_ENTRYPOINT_OFFSET(4, pInvokeSuperTrampolineWithAccessCheck);
-    return NextInvokeInsnSP<4>(cu, info, trampoline, state, target_method, 0);
-  }
+  return NextInvokeInsnSP(cu, info, kQuickInvokeSuperTrampolineWithAccessCheck, state,
+                          target_method, 0);
 }
 
 static int NextVCallInsnSP(CompilationUnit* cu, CallInfo* info, int state,
                            const MethodReference& target_method,
                            uint32_t unused, uintptr_t unused2,
                            uintptr_t unused3, InvokeType unused4) {
-  if (cu->target64) {
-    ThreadOffset<8> trampoline = QUICK_ENTRYPOINT_OFFSET(8,
-        pInvokeVirtualTrampolineWithAccessCheck);
-    return NextInvokeInsnSP<8>(cu, info, trampoline, state, target_method, 0);
-  } else {
-    ThreadOffset<4> trampoline = QUICK_ENTRYPOINT_OFFSET(4,
-        pInvokeVirtualTrampolineWithAccessCheck);
-    return NextInvokeInsnSP<4>(cu, info, trampoline, state, target_method, 0);
-  }
+  return NextInvokeInsnSP(cu, info, kQuickInvokeVirtualTrampolineWithAccessCheck, state,
+                          target_method, 0);
 }
 
 static int NextInterfaceCallInsnWithAccessCheck(CompilationUnit* cu,
@@ -810,15 +716,8 @@
                                                 const MethodReference& target_method,
                                                 uint32_t unused, uintptr_t unused2,
                                                 uintptr_t unused3, InvokeType unused4) {
-  if (cu->target64) {
-      ThreadOffset<8> trampoline = QUICK_ENTRYPOINT_OFFSET(8,
-          pInvokeInterfaceTrampolineWithAccessCheck);
-      return NextInvokeInsnSP<8>(cu, info, trampoline, state, target_method, 0);
-    } else {
-      ThreadOffset<4> trampoline = QUICK_ENTRYPOINT_OFFSET(4,
-          pInvokeInterfaceTrampolineWithAccessCheck);
-      return NextInvokeInsnSP<4>(cu, info, trampoline, state, target_method, 0);
-    }
+  return NextInvokeInsnSP(cu, info, kQuickInvokeInterfaceTrampolineWithAccessCheck, state,
+                          target_method, 0);
 }
 
 int Mir2Lir::LoadArgRegs(CallInfo* info, int call_state,
@@ -1184,13 +1083,8 @@
     // Generate memcpy
     OpRegRegImm(kOpAdd, TargetReg(kArg0, kRef), TargetPtrReg(kSp), outs_offset);
     OpRegRegImm(kOpAdd, TargetReg(kArg1, kRef), TargetPtrReg(kSp), start_offset);
-    if (cu_->target64) {
-      CallRuntimeHelperRegRegImm(QUICK_ENTRYPOINT_OFFSET(8, pMemcpy), TargetReg(kArg0, kRef),
-                                 TargetReg(kArg1, kRef), (info->num_arg_words - 3) * 4, false);
-    } else {
-      CallRuntimeHelperRegRegImm(QUICK_ENTRYPOINT_OFFSET(4, pMemcpy), TargetReg(kArg0, kRef),
-                                 TargetReg(kArg1, kRef), (info->num_arg_words - 3) * 4, false);
-    }
+    CallRuntimeHelperRegRegImm(kQuickMemcpy, TargetReg(kArg0, kRef), TargetReg(kArg1, kRef),
+                               (info->num_arg_words - 3) * 4, false);
   }
 
   call_state = LoadArgRegs(info, call_state, next_call_insn,
@@ -1345,55 +1239,30 @@
   RegLocation rl_obj = info->args[0];
   RegLocation rl_idx = info->args[1];
   rl_obj = LoadValue(rl_obj, kRefReg);
-  // X86 wants to avoid putting a constant index into a register.
-  if (!((cu_->instruction_set == kX86 || cu_->instruction_set == kX86_64)&& rl_idx.is_const)) {
-    rl_idx = LoadValue(rl_idx, kCoreReg);
-  }
+  rl_idx = LoadValue(rl_idx, kCoreReg);
   RegStorage reg_max;
   GenNullCheck(rl_obj.reg, info->opt_flags);
   bool range_check = (!(info->opt_flags & MIR_IGNORE_RANGE_CHECK));
   LIR* range_check_branch = nullptr;
   RegStorage reg_off;
   RegStorage reg_ptr;
-  if (cu_->instruction_set != kX86 && cu_->instruction_set != kX86_64) {
-    reg_off = AllocTemp();
-    reg_ptr = AllocTempRef();
-    if (range_check) {
-      reg_max = AllocTemp();
-      Load32Disp(rl_obj.reg, count_offset, reg_max);
-      MarkPossibleNullPointerException(info->opt_flags);
-    }
-    Load32Disp(rl_obj.reg, offset_offset, reg_off);
+  reg_off = AllocTemp();
+  reg_ptr = AllocTempRef();
+  if (range_check) {
+    reg_max = AllocTemp();
+    Load32Disp(rl_obj.reg, count_offset, reg_max);
     MarkPossibleNullPointerException(info->opt_flags);
-    LoadRefDisp(rl_obj.reg, value_offset, reg_ptr, kNotVolatile);
-    if (range_check) {
-      // Set up a slow path to allow retry in case of bounds violation */
-      OpRegReg(kOpCmp, rl_idx.reg, reg_max);
-      FreeTemp(reg_max);
-      range_check_branch = OpCondBranch(kCondUge, nullptr);
-    }
-    OpRegImm(kOpAdd, reg_ptr, data_offset);
-  } else {
-    if (range_check) {
-      // On x86, we can compare to memory directly
-      // Set up a launch pad to allow retry in case of bounds violation */
-      if (rl_idx.is_const) {
-        LIR* comparison;
-        range_check_branch = OpCmpMemImmBranch(
-            kCondUlt, RegStorage::InvalidReg(), rl_obj.reg, count_offset,
-            mir_graph_->ConstantValue(rl_idx.orig_sreg), nullptr, &comparison);
-        MarkPossibleNullPointerExceptionAfter(0, comparison);
-     } else {
-        OpRegMem(kOpCmp, rl_idx.reg, rl_obj.reg, count_offset);
-        MarkPossibleNullPointerException(0);
-        range_check_branch = OpCondBranch(kCondUge, nullptr);
-      }
-    }
-    reg_off = AllocTemp();
-    reg_ptr = AllocTempRef();
-    Load32Disp(rl_obj.reg, offset_offset, reg_off);
-    LoadRefDisp(rl_obj.reg, value_offset, reg_ptr, kNotVolatile);
   }
+  Load32Disp(rl_obj.reg, offset_offset, reg_off);
+  MarkPossibleNullPointerException(info->opt_flags);
+  LoadRefDisp(rl_obj.reg, value_offset, reg_ptr, kNotVolatile);
+  if (range_check) {
+    // Set up a slow path to allow retry in case of bounds violation */
+    OpRegReg(kOpCmp, rl_idx.reg, reg_max);
+    FreeTemp(reg_max);
+    range_check_branch = OpCondBranch(kCondUge, nullptr);
+  }
+  OpRegImm(kOpAdd, reg_ptr, data_offset);
   if (rl_idx.is_const) {
     OpRegImm(kOpAdd, reg_off, mir_graph_->ConstantValue(rl_idx.orig_sreg));
   } else {
@@ -1405,11 +1274,7 @@
   }
   RegLocation rl_dest = InlineTarget(info);
   RegLocation rl_result = EvalLoc(rl_dest, kCoreReg, true);
-  if (cu_->instruction_set != kX86 && cu_->instruction_set != kX86_64) {
-    LoadBaseIndexed(reg_ptr, reg_off, rl_result.reg, 1, kUnsignedHalf);
-  } else {
-    LoadBaseIndexedDisp(reg_ptr, reg_off, 1, data_offset, rl_result.reg, kUnsignedHalf);
-  }
+  LoadBaseIndexed(reg_ptr, reg_off, rl_result.reg, 1, kUnsignedHalf);
   FreeTemp(reg_off);
   FreeTemp(reg_ptr);
   StoreValue(rl_dest, rl_result);
@@ -1624,9 +1489,7 @@
     RegLocation rl_start = info->args[2];     // 3rd arg only present in III flavor of IndexOf.
     LoadValueDirectFixed(rl_start, reg_start);
   }
-  RegStorage r_tgt = cu_->target64 ?
-      LoadHelper(QUICK_ENTRYPOINT_OFFSET(8, pIndexOf)) :
-      LoadHelper(QUICK_ENTRYPOINT_OFFSET(4, pIndexOf));
+  RegStorage r_tgt = LoadHelper(kQuickIndexOf);
   GenExplicitNullCheck(reg_ptr, info->opt_flags);
   LIR* high_code_point_branch =
       rl_char.is_const ? nullptr : OpCmpImmBranch(kCondGt, reg_char, 0xFFFF, nullptr);
@@ -1665,11 +1528,7 @@
   LoadValueDirectFixed(rl_cmp, reg_cmp);
   RegStorage r_tgt;
   if (cu_->instruction_set != kX86 && cu_->instruction_set != kX86_64) {
-    if (cu_->target64) {
-      r_tgt = LoadHelper(QUICK_ENTRYPOINT_OFFSET(8, pStringCompareTo));
-    } else {
-      r_tgt = LoadHelper(QUICK_ENTRYPOINT_OFFSET(4, pStringCompareTo));
-    }
+    r_tgt = LoadHelper(kQuickStringCompareTo);
   } else {
     r_tgt = RegStorage::InvalidReg();
   }
@@ -1679,15 +1538,7 @@
   LIR* cmp_null_check_branch = OpCmpImmBranch(kCondEq, reg_cmp, 0, nullptr);
   AddIntrinsicSlowPath(info, cmp_null_check_branch);
   // NOTE: not a safepoint
-  if (cu_->instruction_set != kX86 && cu_->instruction_set != kX86_64) {
-    OpReg(kOpBlx, r_tgt);
-  } else {
-    if (cu_->target64) {
-      OpThreadMem(kOpBlx, QUICK_ENTRYPOINT_OFFSET(8, pStringCompareTo));
-    } else {
-      OpThreadMem(kOpBlx, QUICK_ENTRYPOINT_OFFSET(4, pStringCompareTo));
-    }
-  }
+  CallHelper(r_tgt, kQuickStringCompareTo, false, true);
   RegLocation rl_return = GetReturn(kCoreReg);
   RegLocation rl_dest = InlineTarget(info);
   StoreValue(rl_dest, rl_return);
@@ -1850,29 +1701,29 @@
   GenInvokeNoInline(info);
 }
 
-template <size_t pointer_size>
 static LIR* GenInvokeNoInlineCall(Mir2Lir* mir_to_lir, InvokeType type) {
-  ThreadOffset<pointer_size> trampoline(-1);
+  QuickEntrypointEnum trampoline;
   switch (type) {
     case kInterface:
-      trampoline = QUICK_ENTRYPOINT_OFFSET(pointer_size, pInvokeInterfaceTrampolineWithAccessCheck);
+      trampoline = kQuickInvokeInterfaceTrampolineWithAccessCheck;
       break;
     case kDirect:
-      trampoline = QUICK_ENTRYPOINT_OFFSET(pointer_size, pInvokeDirectTrampolineWithAccessCheck);
+      trampoline = kQuickInvokeDirectTrampolineWithAccessCheck;
       break;
     case kStatic:
-      trampoline = QUICK_ENTRYPOINT_OFFSET(pointer_size, pInvokeStaticTrampolineWithAccessCheck);
+      trampoline = kQuickInvokeStaticTrampolineWithAccessCheck;
       break;
     case kSuper:
-      trampoline = QUICK_ENTRYPOINT_OFFSET(pointer_size, pInvokeSuperTrampolineWithAccessCheck);
+      trampoline = kQuickInvokeSuperTrampolineWithAccessCheck;
       break;
     case kVirtual:
-      trampoline = QUICK_ENTRYPOINT_OFFSET(pointer_size, pInvokeVirtualTrampolineWithAccessCheck);
+      trampoline = kQuickInvokeVirtualTrampolineWithAccessCheck;
       break;
     default:
       LOG(FATAL) << "Unexpected invoke type";
+      trampoline = kQuickInvokeInterfaceTrampolineWithAccessCheck;
   }
-  return mir_to_lir->OpThreadMem(kOpBlx, trampoline);
+  return mir_to_lir->InvokeTrampoline(kOpBlx, RegStorage::InvalidReg(), trampoline);
 }
 
 void Mir2Lir::GenInvokeNoInline(CallInfo* info) {
@@ -1943,12 +1794,7 @@
                           mirror::ArtMethod::EntryPointFromQuickCompiledCodeOffset().Int32Value());
       }
     } else {
-      // TODO: Extract?
-      if (cu_->target64) {
-        call_inst = GenInvokeNoInlineCall<8>(this, info->type);
-      } else {
-        call_inst = GenInvokeNoInlineCall<4>(this, info->type);
-      }
+      call_inst = GenInvokeNoInlineCall(this, info->type);
     }
   }
   EndInvoke(info);
diff --git a/compiler/dex/quick/mips/call_mips.cc b/compiler/dex/quick/mips/call_mips.cc
index 26ea6a8..9adddf0 100644
--- a/compiler/dex/quick/mips/call_mips.cc
+++ b/compiler/dex/quick/mips/call_mips.cc
@@ -245,7 +245,7 @@
   GenBarrier();
   NewLIR0(kMipsCurrPC);  // Really a jal to .+8
   // Now, fill the branch delay slot with the helper load
-  RegStorage r_tgt = LoadHelper(QUICK_ENTRYPOINT_OFFSET(4, pHandleFillArrayData));
+  RegStorage r_tgt = LoadHelper(kQuickHandleFillArrayData);
   GenBarrier();  // Scheduling barrier
 
   // Construct BaseLabel and set up table base register
@@ -332,9 +332,9 @@
         m2l_->Load32Disp(rs_rMIPS_SP, 0, rs_rRA);
         m2l_->OpRegImm(kOpAdd, rs_rMIPS_SP, sp_displace_);
         m2l_->ClobberCallerSave();
-        ThreadOffset<4> func_offset = QUICK_ENTRYPOINT_OFFSET(4, pThrowStackOverflow);
-        RegStorage r_tgt = m2l_->CallHelperSetup(func_offset);  // Doesn't clobber LR.
-        m2l_->CallHelper(r_tgt, func_offset, false /* MarkSafepointPC */, false /* UseLink */);
+        RegStorage r_tgt = m2l_->CallHelperSetup(kQuickThrowStackOverflow);  // Doesn't clobber LR.
+        m2l_->CallHelper(r_tgt, kQuickThrowStackOverflow, false /* MarkSafepointPC */,
+                         false /* UseLink */);
       }
 
      private:
diff --git a/compiler/dex/quick/mips/codegen_mips.h b/compiler/dex/quick/mips/codegen_mips.h
index bb18ad2..4bd2748 100644
--- a/compiler/dex/quick/mips/codegen_mips.h
+++ b/compiler/dex/quick/mips/codegen_mips.h
@@ -31,22 +31,17 @@
                             RegLocation rl_dest, int lit);
     bool EasyMultiply(RegLocation rl_src, RegLocation rl_dest, int lit) OVERRIDE;
     LIR* CheckSuspendUsingLoad() OVERRIDE;
-    RegStorage LoadHelper(ThreadOffset<4> offset) OVERRIDE;
-    RegStorage LoadHelper(ThreadOffset<8> offset) OVERRIDE;
+    RegStorage LoadHelper(QuickEntrypointEnum trampoline) OVERRIDE;
     LIR* LoadBaseDisp(RegStorage r_base, int displacement, RegStorage r_dest,
                       OpSize size, VolatileKind is_volatile) OVERRIDE;
     LIR* LoadBaseIndexed(RegStorage r_base, RegStorage r_index, RegStorage r_dest, int scale,
                          OpSize size) OVERRIDE;
-    LIR* LoadBaseIndexedDisp(RegStorage r_base, RegStorage r_index, int scale, int displacement,
-                             RegStorage r_dest, OpSize size) OVERRIDE;
     LIR* LoadConstantNoClobber(RegStorage r_dest, int value);
     LIR* LoadConstantWide(RegStorage r_dest, int64_t value);
     LIR* StoreBaseDisp(RegStorage r_base, int displacement, RegStorage r_src,
                        OpSize size, VolatileKind is_volatile) OVERRIDE;
     LIR* StoreBaseIndexed(RegStorage r_base, RegStorage r_index, RegStorage r_src, int scale,
                           OpSize size) OVERRIDE;
-    LIR* StoreBaseIndexedDisp(RegStorage r_base, RegStorage r_index, int scale, int displacement,
-                              RegStorage r_src, OpSize size) OVERRIDE;
     LIR* GenAtomic64Load(RegStorage r_base, int displacement, RegStorage r_dest);
     LIR* GenAtomic64Store(RegStorage r_base, int displacement, RegStorage r_src);
     void MarkGCCard(RegStorage val_reg, RegStorage tgt_addr_reg);
@@ -166,7 +161,6 @@
     void OpRegCopy(RegStorage r_dest, RegStorage r_src);
     LIR* OpRegCopyNoInsert(RegStorage r_dest, RegStorage r_src);
     LIR* OpRegImm(OpKind op, RegStorage r_dest_src1, int value);
-    LIR* OpRegMem(OpKind op, RegStorage r_dest, RegStorage r_base, int offset);
     LIR* OpRegReg(OpKind op, RegStorage r_dest_src1, RegStorage r_src2);
     LIR* OpMovRegMem(RegStorage r_dest, RegStorage r_base, int offset, MoveType move_type);
     LIR* OpMovMemReg(RegStorage r_base, int offset, RegStorage r_src, MoveType move_type);
@@ -174,14 +168,9 @@
     LIR* OpRegRegImm(OpKind op, RegStorage r_dest, RegStorage r_src1, int value);
     LIR* OpRegRegReg(OpKind op, RegStorage r_dest, RegStorage r_src1, RegStorage r_src2);
     LIR* OpTestSuspend(LIR* target);
-    LIR* OpThreadMem(OpKind op, ThreadOffset<4> thread_offset) OVERRIDE;
-    LIR* OpThreadMem(OpKind op, ThreadOffset<8> thread_offset) OVERRIDE;
     LIR* OpVldm(RegStorage r_base, int count);
     LIR* OpVstm(RegStorage r_base, int count);
-    void OpLea(RegStorage r_base, RegStorage reg1, RegStorage reg2, int scale, int offset);
     void OpRegCopyWide(RegStorage dest, RegStorage src);
-    void OpTlsCmp(ThreadOffset<4> offset, int val) OVERRIDE;
-    void OpTlsCmp(ThreadOffset<8> offset, int val) OVERRIDE;
 
     // TODO: collapse r_dest.
     LIR* LoadBaseDispBody(RegStorage r_base, int displacement, RegStorage r_dest,
@@ -204,6 +193,8 @@
       return false;  // Wide FPRs are formed by pairing.
     }
 
+    LIR* InvokeTrampoline(OpKind op, RegStorage r_tgt, QuickEntrypointEnum trampoline) OVERRIDE;
+
   private:
     void ConvertShortToLongBranch(LIR* lir);
     RegLocation GenDivRem(RegLocation rl_dest, RegLocation rl_src1,
diff --git a/compiler/dex/quick/mips/fp_mips.cc b/compiler/dex/quick/mips/fp_mips.cc
index 7087be9..3a4128a 100644
--- a/compiler/dex/quick/mips/fp_mips.cc
+++ b/compiler/dex/quick/mips/fp_mips.cc
@@ -50,8 +50,7 @@
     case Instruction::REM_FLOAT_2ADDR:
     case Instruction::REM_FLOAT:
       FlushAllRegs();   // Send everything to home location
-      CallRuntimeHelperRegLocationRegLocation(QUICK_ENTRYPOINT_OFFSET(4, pFmodf), rl_src1, rl_src2,
-                                              false);
+      CallRuntimeHelperRegLocationRegLocation(kQuickFmodf, rl_src1, rl_src2, false);
       rl_result = GetReturn(kFPReg);
       StoreValue(rl_dest, rl_result);
       return;
@@ -93,8 +92,7 @@
     case Instruction::REM_DOUBLE_2ADDR:
     case Instruction::REM_DOUBLE:
       FlushAllRegs();   // Send everything to home location
-      CallRuntimeHelperRegLocationRegLocation(QUICK_ENTRYPOINT_OFFSET(4, pFmod), rl_src1, rl_src2,
-                                              false);
+      CallRuntimeHelperRegLocationRegLocation(kQuickFmod, rl_src1, rl_src2, false);
       rl_result = GetReturnWide(kFPReg);
       StoreValueWide(rl_dest, rl_result);
       return;
@@ -133,22 +131,22 @@
       op = kMipsFcvtdw;
       break;
     case Instruction::FLOAT_TO_INT:
-      GenConversionCall(QUICK_ENTRYPOINT_OFFSET(4, pF2iz), rl_dest, rl_src);
+      GenConversionCall(kQuickF2iz, rl_dest, rl_src);
       return;
     case Instruction::DOUBLE_TO_INT:
-      GenConversionCall(QUICK_ENTRYPOINT_OFFSET(4, pD2iz), rl_dest, rl_src);
+      GenConversionCall(kQuickD2iz, rl_dest, rl_src);
       return;
     case Instruction::LONG_TO_DOUBLE:
-      GenConversionCall(QUICK_ENTRYPOINT_OFFSET(4, pL2d), rl_dest, rl_src);
+      GenConversionCall(kQuickL2d, rl_dest, rl_src);
       return;
     case Instruction::FLOAT_TO_LONG:
-      GenConversionCall(QUICK_ENTRYPOINT_OFFSET(4, pF2l), rl_dest, rl_src);
+      GenConversionCall(kQuickF2l, rl_dest, rl_src);
       return;
     case Instruction::LONG_TO_FLOAT:
-      GenConversionCall(QUICK_ENTRYPOINT_OFFSET(4, pL2f), rl_dest, rl_src);
+      GenConversionCall(kQuickL2f, rl_dest, rl_src);
       return;
     case Instruction::DOUBLE_TO_LONG:
-      GenConversionCall(QUICK_ENTRYPOINT_OFFSET(4, pD2l), rl_dest, rl_src);
+      GenConversionCall(kQuickD2l, rl_dest, rl_src);
       return;
     default:
       LOG(FATAL) << "Unexpected opcode: " << opcode;
@@ -170,25 +168,26 @@
 void MipsMir2Lir::GenCmpFP(Instruction::Code opcode, RegLocation rl_dest,
                            RegLocation rl_src1, RegLocation rl_src2) {
   bool wide = true;
-  ThreadOffset<4> offset(-1);
+  QuickEntrypointEnum target;
 
   switch (opcode) {
     case Instruction::CMPL_FLOAT:
-      offset = QUICK_ENTRYPOINT_OFFSET(4, pCmplFloat);
+      target = kQuickCmplFloat;
       wide = false;
       break;
     case Instruction::CMPG_FLOAT:
-      offset = QUICK_ENTRYPOINT_OFFSET(4, pCmpgFloat);
+      target = kQuickCmpgFloat;
       wide = false;
       break;
     case Instruction::CMPL_DOUBLE:
-      offset = QUICK_ENTRYPOINT_OFFSET(4, pCmplDouble);
+      target = kQuickCmplDouble;
       break;
     case Instruction::CMPG_DOUBLE:
-      offset = QUICK_ENTRYPOINT_OFFSET(4, pCmpgDouble);
+      target = kQuickCmpgDouble;
       break;
     default:
       LOG(FATAL) << "Unexpected opcode: " << opcode;
+      target = kQuickCmplFloat;
   }
   FlushAllRegs();
   LockCallTemps();
@@ -201,7 +200,7 @@
     LoadValueDirectFixed(rl_src1, rs_rMIPS_FARG0);
     LoadValueDirectFixed(rl_src2, rs_rMIPS_FARG2);
   }
-  RegStorage r_tgt = LoadHelper(offset);
+  RegStorage r_tgt = LoadHelper(target);
   // NOTE: not a safepoint
   OpReg(kOpBlx, r_tgt);
   RegLocation rl_result = GetReturn(kCoreReg);
diff --git a/compiler/dex/quick/mips/int_mips.cc b/compiler/dex/quick/mips/int_mips.cc
index 054514e..d727615 100644
--- a/compiler/dex/quick/mips/int_mips.cc
+++ b/compiler/dex/quick/mips/int_mips.cc
@@ -273,19 +273,6 @@
   return rl_dest;
 }
 
-void MipsMir2Lir::OpLea(RegStorage r_base, RegStorage reg1, RegStorage reg2, int scale,
-                        int offset) {
-  LOG(FATAL) << "Unexpected use of OpLea for Arm";
-}
-
-void MipsMir2Lir::OpTlsCmp(ThreadOffset<4> offset, int val) {
-  LOG(FATAL) << "Unexpected use of OpTlsCmp for Arm";
-}
-
-void MipsMir2Lir::OpTlsCmp(ThreadOffset<8> offset, int val) {
-  UNIMPLEMENTED(FATAL) << "Should not be called.";
-}
-
 bool MipsMir2Lir::GenInlinedCas(CallInfo* info, bool is_long, bool is_object) {
   DCHECK_NE(cu_->instruction_set, kThumb2);
   return false;
diff --git a/compiler/dex/quick/mips/target_mips.cc b/compiler/dex/quick/mips/target_mips.cc
index 4ba94c4..bc91fbcd 100644
--- a/compiler/dex/quick/mips/target_mips.cc
+++ b/compiler/dex/quick/mips/target_mips.cc
@@ -476,17 +476,12 @@
  * ensure that all branch instructions can be restarted if
  * there is a trap in the shadow.  Allocate a temp register.
  */
-RegStorage MipsMir2Lir::LoadHelper(ThreadOffset<4> offset) {
+RegStorage MipsMir2Lir::LoadHelper(QuickEntrypointEnum trampoline) {
   // NOTE: native pointer.
-  LoadWordDisp(rs_rMIPS_SELF, offset.Int32Value(), rs_rT9);
+  LoadWordDisp(rs_rMIPS_SELF, GetThreadOffset<4>(trampoline).Int32Value(), rs_rT9);
   return rs_rT9;
 }
 
-RegStorage MipsMir2Lir::LoadHelper(ThreadOffset<8> offset) {
-  UNIMPLEMENTED(FATAL) << "Should not be called.";
-  return RegStorage::InvalidReg();
-}
-
 LIR* MipsMir2Lir::CheckSuspendUsingLoad() {
   RegStorage tmp = AllocTemp();
   // NOTE: native pointer.
@@ -503,7 +498,7 @@
   LockCallTemps();  // Using fixed registers
   RegStorage reg_ptr = TargetReg(kArg0);
   OpRegRegImm(kOpAdd, reg_ptr, r_base, displacement);
-  RegStorage r_tgt = LoadHelper(QUICK_ENTRYPOINT_OFFSET(4, pA64Load));
+  RegStorage r_tgt = LoadHelper(kQuickA64Load);
   LIR *ret = OpReg(kOpBlx, r_tgt);
   RegStorage reg_ret = RegStorage::MakeRegPair(TargetReg(kRet0), TargetReg(kRet1));
   OpRegCopyWide(r_dest, reg_ret);
@@ -525,7 +520,7 @@
   OpRegCopyWide(reg_value, temp_value);
   FreeTemp(temp_ptr);
   FreeTemp(temp_value);
-  RegStorage r_tgt = LoadHelper(QUICK_ENTRYPOINT_OFFSET(4, pA64Store));
+  RegStorage r_tgt = LoadHelper(kQuickA64Store);
   return OpReg(kOpBlx, r_tgt);
 }
 
diff --git a/compiler/dex/quick/mips/utility_mips.cc b/compiler/dex/quick/mips/utility_mips.cc
index 0e8188b..7178ede 100644
--- a/compiler/dex/quick/mips/utility_mips.cc
+++ b/compiler/dex/quick/mips/utility_mips.cc
@@ -680,41 +680,18 @@
   return store;
 }
 
-LIR* MipsMir2Lir::OpThreadMem(OpKind op, ThreadOffset<4> thread_offset) {
-  LOG(FATAL) << "Unexpected use of OpThreadMem for MIPS";
-  return NULL;
-}
-
-LIR* MipsMir2Lir::OpThreadMem(OpKind op, ThreadOffset<8> thread_offset) {
-  UNIMPLEMENTED(FATAL) << "Should not be called.";
-  return nullptr;
-}
-
 LIR* MipsMir2Lir::OpMem(OpKind op, RegStorage r_base, int disp) {
   LOG(FATAL) << "Unexpected use of OpMem for MIPS";
   return NULL;
 }
 
-LIR* MipsMir2Lir::StoreBaseIndexedDisp(RegStorage r_base, RegStorage r_index, int scale,
-                                       int displacement, RegStorage r_src, OpSize size) {
-  LOG(FATAL) << "Unexpected use of StoreBaseIndexedDisp for MIPS";
-  return NULL;
-}
-
-LIR* MipsMir2Lir::OpRegMem(OpKind op, RegStorage r_dest, RegStorage r_base, int offset) {
-  LOG(FATAL) << "Unexpected use of OpRegMem for MIPS";
-  return NULL;
-}
-
-LIR* MipsMir2Lir::LoadBaseIndexedDisp(RegStorage r_base, RegStorage r_index, int scale,
-                                      int displacement, RegStorage r_dest, OpSize size) {
-  LOG(FATAL) << "Unexpected use of LoadBaseIndexedDisp for MIPS";
-  return NULL;
-}
-
 LIR* MipsMir2Lir::OpCondBranch(ConditionCode cc, LIR* target) {
   LOG(FATAL) << "Unexpected use of OpCondBranch for MIPS";
   return NULL;
 }
 
+LIR* MipsMir2Lir::InvokeTrampoline(OpKind op, RegStorage r_tgt, QuickEntrypointEnum trampoline) {
+  return OpReg(op, r_tgt);
+}
+
 }  // namespace art
diff --git a/compiler/dex/quick/mir_to_lir.h b/compiler/dex/quick/mir_to_lir.h
index f183dc9..b832223 100644
--- a/compiler/dex/quick/mir_to_lir.h
+++ b/compiler/dex/quick/mir_to_lir.h
@@ -28,6 +28,7 @@
 #include "driver/compiler_driver.h"
 #include "instruction_set.h"
 #include "leb128.h"
+#include "entrypoints/quick/quick_entrypoints_enum.h"
 #include "safe_map.h"
 #include "utils/array_ref.h"
 #include "utils/arena_allocator.h"
@@ -805,7 +806,6 @@
     void MarkPossibleNullPointerExceptionAfter(int opt_flags, LIR* after);
     void MarkPossibleStackOverflowException();
     void ForceImplicitNullCheck(RegStorage reg, int opt_flags);
-    LIR* GenImmedCheck(ConditionCode c_code, RegStorage reg, int imm_val, ThrowKind kind);
     LIR* GenNullCheck(RegStorage m_reg, int opt_flags);
     LIR* GenExplicitNullCheck(RegStorage m_reg, int opt_flags);
     virtual void GenImplicitNullCheck(RegStorage reg, int opt_flags);
@@ -844,9 +844,7 @@
                           RegLocation rl_src, int lit);
     void GenArithOpLong(Instruction::Code opcode, RegLocation rl_dest,
                         RegLocation rl_src1, RegLocation rl_src2);
-    template <size_t pointer_size>
-    void GenConversionCall(ThreadOffset<pointer_size> func_offset, RegLocation rl_dest,
-                           RegLocation rl_src);
+    void GenConversionCall(QuickEntrypointEnum trampoline, RegLocation rl_dest, RegLocation rl_src);
     virtual void GenSuspendTest(int opt_flags);
     virtual void GenSuspendTestAndBranch(int opt_flags, LIR* target);
 
@@ -856,66 +854,44 @@
                        RegLocation rl_src1, RegLocation rl_src2);
 
     // Shared by all targets - implemented in gen_invoke.cc.
-    template <size_t pointer_size>
-    LIR* CallHelper(RegStorage r_tgt, ThreadOffset<pointer_size> helper_offset, bool safepoint_pc,
+    LIR* CallHelper(RegStorage r_tgt, QuickEntrypointEnum trampoline, bool safepoint_pc,
                     bool use_link = true);
-    RegStorage CallHelperSetup(ThreadOffset<4> helper_offset);
-    RegStorage CallHelperSetup(ThreadOffset<8> helper_offset);
-    template <size_t pointer_size>
-    void CallRuntimeHelper(ThreadOffset<pointer_size> helper_offset, bool safepoint_pc);
-    template <size_t pointer_size>
-    void CallRuntimeHelperImm(ThreadOffset<pointer_size> helper_offset, int arg0, bool safepoint_pc);
-    template <size_t pointer_size>
-    void CallRuntimeHelperReg(ThreadOffset<pointer_size> helper_offset, RegStorage arg0, bool safepoint_pc);
-    template <size_t pointer_size>
-    void CallRuntimeHelperRegLocation(ThreadOffset<pointer_size> helper_offset, RegLocation arg0,
+    RegStorage CallHelperSetup(QuickEntrypointEnum trampoline);
+
+    void CallRuntimeHelper(QuickEntrypointEnum trampoline, bool safepoint_pc);
+    void CallRuntimeHelperImm(QuickEntrypointEnum trampoline, int arg0, bool safepoint_pc);
+    void CallRuntimeHelperReg(QuickEntrypointEnum trampoline, RegStorage arg0, bool safepoint_pc);
+    void CallRuntimeHelperRegLocation(QuickEntrypointEnum trampoline, RegLocation arg0,
                                       bool safepoint_pc);
-    template <size_t pointer_size>
-    void CallRuntimeHelperImmImm(ThreadOffset<pointer_size> helper_offset, int arg0, int arg1,
+    void CallRuntimeHelperImmImm(QuickEntrypointEnum trampoline, int arg0, int arg1,
                                  bool safepoint_pc);
-    template <size_t pointer_size>
-    void CallRuntimeHelperImmRegLocation(ThreadOffset<pointer_size> helper_offset, int arg0,
-                                         RegLocation arg1, bool safepoint_pc);
-    template <size_t pointer_size>
-    void CallRuntimeHelperRegLocationImm(ThreadOffset<pointer_size> helper_offset, RegLocation arg0,
-                                         int arg1, bool safepoint_pc);
-    template <size_t pointer_size>
-    void CallRuntimeHelperImmReg(ThreadOffset<pointer_size> helper_offset, int arg0, RegStorage arg1,
+    void CallRuntimeHelperImmRegLocation(QuickEntrypointEnum trampoline, int arg0, RegLocation arg1,
+                                         bool safepoint_pc);
+    void CallRuntimeHelperRegLocationImm(QuickEntrypointEnum trampoline, RegLocation arg0, int arg1,
+                                         bool safepoint_pc);
+    void CallRuntimeHelperImmReg(QuickEntrypointEnum trampoline, int arg0, RegStorage arg1,
                                  bool safepoint_pc);
-    template <size_t pointer_size>
-    void CallRuntimeHelperRegImm(ThreadOffset<pointer_size> helper_offset, RegStorage arg0, int arg1,
+    void CallRuntimeHelperRegImm(QuickEntrypointEnum trampoline, RegStorage arg0, int arg1,
                                  bool safepoint_pc);
-    template <size_t pointer_size>
-    void CallRuntimeHelperImmMethod(ThreadOffset<pointer_size> helper_offset, int arg0,
+    void CallRuntimeHelperImmMethod(QuickEntrypointEnum trampoline, int arg0, bool safepoint_pc);
+    void CallRuntimeHelperRegMethod(QuickEntrypointEnum trampoline, RegStorage arg0,
                                     bool safepoint_pc);
-    template <size_t pointer_size>
-    void CallRuntimeHelperRegMethod(ThreadOffset<pointer_size> helper_offset, RegStorage arg0,
-                                    bool safepoint_pc);
-    template <size_t pointer_size>
-    void CallRuntimeHelperRegMethodRegLocation(ThreadOffset<pointer_size> helper_offset,
-                                               RegStorage arg0, RegLocation arg2, bool safepoint_pc);
-    template <size_t pointer_size>
-    void CallRuntimeHelperRegLocationRegLocation(ThreadOffset<pointer_size> helper_offset,
-                                                 RegLocation arg0, RegLocation arg1,
-                                                 bool safepoint_pc);
-    template <size_t pointer_size>
-    void CallRuntimeHelperRegReg(ThreadOffset<pointer_size> helper_offset, RegStorage arg0,
-                                 RegStorage arg1, bool safepoint_pc);
-    template <size_t pointer_size>
-    void CallRuntimeHelperRegRegImm(ThreadOffset<pointer_size> helper_offset, RegStorage arg0,
-                                    RegStorage arg1, int arg2, bool safepoint_pc);
-    template <size_t pointer_size>
-    void CallRuntimeHelperImmMethodRegLocation(ThreadOffset<pointer_size> helper_offset, int arg0,
+    void CallRuntimeHelperRegMethodRegLocation(QuickEntrypointEnum trampoline, RegStorage arg0,
                                                RegLocation arg2, bool safepoint_pc);
-    template <size_t pointer_size>
-    void CallRuntimeHelperImmMethodImm(ThreadOffset<pointer_size> helper_offset, int arg0, int arg2,
+    void CallRuntimeHelperRegLocationRegLocation(QuickEntrypointEnum trampoline, RegLocation arg0,
+                                                 RegLocation arg1, bool safepoint_pc);
+    void CallRuntimeHelperRegReg(QuickEntrypointEnum trampoline, RegStorage arg0, RegStorage arg1,
+                                 bool safepoint_pc);
+    void CallRuntimeHelperRegRegImm(QuickEntrypointEnum trampoline, RegStorage arg0,
+                                    RegStorage arg1, int arg2, bool safepoint_pc);
+    void CallRuntimeHelperImmMethodRegLocation(QuickEntrypointEnum trampoline, int arg0,
+                                               RegLocation arg2, bool safepoint_pc);
+    void CallRuntimeHelperImmMethodImm(QuickEntrypointEnum trampoline, int arg0, int arg2,
                                        bool safepoint_pc);
-    template <size_t pointer_size>
-    void CallRuntimeHelperImmRegLocationRegLocation(ThreadOffset<pointer_size> helper_offset,
-                                                    int arg0, RegLocation arg1, RegLocation arg2,
+    void CallRuntimeHelperImmRegLocationRegLocation(QuickEntrypointEnum trampoline, int arg0,
+                                                    RegLocation arg1, RegLocation arg2,
                                                     bool safepoint_pc);
-    template <size_t pointer_size>
-    void CallRuntimeHelperRegLocationRegLocationRegLocation(ThreadOffset<pointer_size> helper_offset,
+    void CallRuntimeHelperRegLocationRegLocationRegLocation(QuickEntrypointEnum trampoline,
                                                             RegLocation arg0, RegLocation arg1,
                                                             RegLocation arg2,
                                                             bool safepoint_pc);
@@ -954,7 +930,7 @@
     RegLocation InlineTargetWide(CallInfo* info);
 
     bool GenInlinedGet(CallInfo* info);
-    bool GenInlinedCharAt(CallInfo* info);
+    virtual bool GenInlinedCharAt(CallInfo* info);
     bool GenInlinedStringIsEmptyOrLength(CallInfo* info, bool is_empty);
     virtual bool GenInlinedReverseBits(CallInfo* info, OpSize size);
     bool GenInlinedReverseBytes(CallInfo* info, OpSize size);
@@ -1132,23 +1108,18 @@
     virtual bool EasyMultiply(RegLocation rl_src, RegLocation rl_dest, int lit) = 0;
     virtual LIR* CheckSuspendUsingLoad() = 0;
 
-    virtual RegStorage LoadHelper(ThreadOffset<4> offset) = 0;
-    virtual RegStorage LoadHelper(ThreadOffset<8> offset) = 0;
+    virtual RegStorage LoadHelper(QuickEntrypointEnum trampoline) = 0;
 
     virtual LIR* LoadBaseDisp(RegStorage r_base, int displacement, RegStorage r_dest,
                               OpSize size, VolatileKind is_volatile) = 0;
     virtual LIR* LoadBaseIndexed(RegStorage r_base, RegStorage r_index, RegStorage r_dest,
                                  int scale, OpSize size) = 0;
-    virtual LIR* LoadBaseIndexedDisp(RegStorage r_base, RegStorage r_index, int scale,
-                                     int displacement, RegStorage r_dest, OpSize size) = 0;
     virtual LIR* LoadConstantNoClobber(RegStorage r_dest, int value) = 0;
     virtual LIR* LoadConstantWide(RegStorage r_dest, int64_t value) = 0;
     virtual LIR* StoreBaseDisp(RegStorage r_base, int displacement, RegStorage r_src,
                                OpSize size, VolatileKind is_volatile) = 0;
     virtual LIR* StoreBaseIndexed(RegStorage r_base, RegStorage r_index, RegStorage r_src,
                                   int scale, OpSize size) = 0;
-    virtual LIR* StoreBaseIndexedDisp(RegStorage r_base, RegStorage r_index, int scale,
-                                      int displacement, RegStorage r_src, OpSize size) = 0;
     virtual void MarkGCCard(RegStorage val_reg, RegStorage tgt_addr_reg) = 0;
 
     // Required for target - register utilities.
@@ -1399,7 +1370,6 @@
     virtual void OpRegCopy(RegStorage r_dest, RegStorage r_src) = 0;
     virtual LIR* OpRegCopyNoInsert(RegStorage r_dest, RegStorage r_src) = 0;
     virtual LIR* OpRegImm(OpKind op, RegStorage r_dest_src1, int value) = 0;
-    virtual LIR* OpRegMem(OpKind op, RegStorage r_dest, RegStorage r_base, int offset) = 0;
     virtual LIR* OpRegReg(OpKind op, RegStorage r_dest_src1, RegStorage r_src2) = 0;
 
     /**
@@ -1439,15 +1409,9 @@
     virtual LIR* OpRegRegReg(OpKind op, RegStorage r_dest, RegStorage r_src1,
                              RegStorage r_src2) = 0;
     virtual LIR* OpTestSuspend(LIR* target) = 0;
-    virtual LIR* OpThreadMem(OpKind op, ThreadOffset<4> thread_offset) = 0;
-    virtual LIR* OpThreadMem(OpKind op, ThreadOffset<8> thread_offset) = 0;
     virtual LIR* OpVldm(RegStorage r_base, int count) = 0;
     virtual LIR* OpVstm(RegStorage r_base, int count) = 0;
-    virtual void OpLea(RegStorage r_base, RegStorage reg1, RegStorage reg2, int scale,
-                       int offset) = 0;
     virtual void OpRegCopyWide(RegStorage dest, RegStorage src) = 0;
-    virtual void OpTlsCmp(ThreadOffset<4> offset, int val) = 0;
-    virtual void OpTlsCmp(ThreadOffset<8> offset, int val) = 0;
     virtual bool InexpensiveConstantInt(int32_t value) = 0;
     virtual bool InexpensiveConstantFloat(int32_t value) = 0;
     virtual bool InexpensiveConstantLong(int64_t value) = 0;
@@ -1460,6 +1424,8 @@
     // Temp workaround
     void Workaround7250540(RegLocation rl_dest, RegStorage zero_reg);
 
+    virtual LIR* InvokeTrampoline(OpKind op, RegStorage r_tgt, QuickEntrypointEnum trampoline) = 0;
+
   protected:
     Mir2Lir(CompilationUnit* cu, MIRGraph* mir_graph, ArenaAllocator* arena);
 
diff --git a/compiler/dex/quick/x86/call_x86.cc b/compiler/dex/quick/x86/call_x86.cc
index 40dd9cc..15aae9e 100644
--- a/compiler/dex/quick/x86/call_x86.cc
+++ b/compiler/dex/quick/x86/call_x86.cc
@@ -171,13 +171,7 @@
   }
   NewLIR2(kX86PcRelAdr, payload.GetReg(), WrapPointer(tab_rec));
   OpRegReg(kOpAdd, payload, method_start);
-  if (cu_->target64) {
-    CallRuntimeHelperRegReg(QUICK_ENTRYPOINT_OFFSET(8, pHandleFillArrayData), array_ptr,
-                            payload, true);
-  } else {
-    CallRuntimeHelperRegReg(QUICK_ENTRYPOINT_OFFSET(4, pHandleFillArrayData), array_ptr,
-                            payload, true);
-  }
+  CallRuntimeHelperRegReg(kQuickHandleFillArrayData, array_ptr, payload, true);
 }
 
 void X86Mir2Lir::GenMoveException(RegLocation rl_dest) {
@@ -261,13 +255,8 @@
         m2l_->OpRegImm(kOpAdd, rs_rX86_SP, sp_displace_);
         m2l_->ClobberCallerSave();
         // Assumes codegen and target are in thumb2 mode.
-        if (cu_->target64) {
-          m2l_->CallHelper(RegStorage::InvalidReg(), QUICK_ENTRYPOINT_OFFSET(8, pThrowStackOverflow),
-                           false /* MarkSafepointPC */, false /* UseLink */);
-        } else {
-          m2l_->CallHelper(RegStorage::InvalidReg(), QUICK_ENTRYPOINT_OFFSET(4, pThrowStackOverflow),
-                           false /* MarkSafepointPC */, false /* UseLink */);
-        }
+        m2l_->CallHelper(RegStorage::InvalidReg(), kQuickThrowStackOverflow,
+                         false /* MarkSafepointPC */, false /* UseLink */);
       }
 
      private:
diff --git a/compiler/dex/quick/x86/codegen_x86.h b/compiler/dex/quick/x86/codegen_x86.h
index 49c0a03..3bc79ad 100644
--- a/compiler/dex/quick/x86/codegen_x86.h
+++ b/compiler/dex/quick/x86/codegen_x86.h
@@ -68,14 +68,13 @@
                           RegLocation rl_dest, int lit);
   bool EasyMultiply(RegLocation rl_src, RegLocation rl_dest, int lit) OVERRIDE;
   LIR* CheckSuspendUsingLoad() OVERRIDE;
-  RegStorage LoadHelper(ThreadOffset<4> offset) OVERRIDE;
-  RegStorage LoadHelper(ThreadOffset<8> offset) OVERRIDE;
+  RegStorage LoadHelper(QuickEntrypointEnum trampoline) OVERRIDE;
   LIR* LoadBaseDisp(RegStorage r_base, int displacement, RegStorage r_dest,
                     OpSize size, VolatileKind is_volatile) OVERRIDE;
   LIR* LoadBaseIndexed(RegStorage r_base, RegStorage r_index, RegStorage r_dest, int scale,
                        OpSize size) OVERRIDE;
   LIR* LoadBaseIndexedDisp(RegStorage r_base, RegStorage r_index, int scale, int displacement,
-                           RegStorage r_dest, OpSize size) OVERRIDE;
+                           RegStorage r_dest, OpSize size);
   LIR* LoadConstantNoClobber(RegStorage r_dest, int value);
   LIR* LoadConstantWide(RegStorage r_dest, int64_t value);
   LIR* StoreBaseDisp(RegStorage r_base, int displacement, RegStorage r_src,
@@ -83,7 +82,7 @@
   LIR* StoreBaseIndexed(RegStorage r_base, RegStorage r_index, RegStorage r_src, int scale,
                         OpSize size) OVERRIDE;
   LIR* StoreBaseIndexedDisp(RegStorage r_base, RegStorage r_index, int scale, int displacement,
-                            RegStorage r_src, OpSize size) OVERRIDE;
+                            RegStorage r_src, OpSize size);
   void MarkGCCard(RegStorage val_reg, RegStorage tgt_addr_reg);
   void GenImplicitNullCheck(RegStorage reg, int opt_flags);
 
@@ -179,6 +178,7 @@
   bool GenInlinedAbsDouble(CallInfo* info) OVERRIDE;
   bool GenInlinedPeek(CallInfo* info, OpSize size);
   bool GenInlinedPoke(CallInfo* info, OpSize size);
+  bool GenInlinedCharAt(CallInfo* info) OVERRIDE;
   void GenNotLong(RegLocation rl_dest, RegLocation rl_src);
   void GenNegLong(RegLocation rl_dest, RegLocation rl_src);
   void GenOrLong(Instruction::Code opcode, RegLocation rl_dest, RegLocation rl_src1,
@@ -300,14 +300,14 @@
   LIR* OpRegRegImm(OpKind op, RegStorage r_dest, RegStorage r_src1, int value);
   LIR* OpRegRegReg(OpKind op, RegStorage r_dest, RegStorage r_src1, RegStorage r_src2);
   LIR* OpTestSuspend(LIR* target);
-  LIR* OpThreadMem(OpKind op, ThreadOffset<4> thread_offset) OVERRIDE;
-  LIR* OpThreadMem(OpKind op, ThreadOffset<8> thread_offset) OVERRIDE;
+  LIR* OpThreadMem(OpKind op, ThreadOffset<4> thread_offset);
+  LIR* OpThreadMem(OpKind op, ThreadOffset<8> thread_offset);
   LIR* OpVldm(RegStorage r_base, int count);
   LIR* OpVstm(RegStorage r_base, int count);
   void OpLea(RegStorage r_base, RegStorage reg1, RegStorage reg2, int scale, int offset);
   void OpRegCopyWide(RegStorage dest, RegStorage src);
-  void OpTlsCmp(ThreadOffset<4> offset, int val) OVERRIDE;
-  void OpTlsCmp(ThreadOffset<8> offset, int val) OVERRIDE;
+  void OpTlsCmp(ThreadOffset<4> offset, int val);
+  void OpTlsCmp(ThreadOffset<8> offset, int val);
 
   void OpRegThreadMem(OpKind op, RegStorage r_dest, ThreadOffset<4> thread_offset);
   void OpRegThreadMem(OpKind op, RegStorage r_dest, ThreadOffset<8> thread_offset);
@@ -402,6 +402,8 @@
    */
   std::vector<uint8_t>* ReturnCallFrameInformation();
 
+  LIR* InvokeTrampoline(OpKind op, RegStorage r_tgt, QuickEntrypointEnum trampoline) OVERRIDE;
+
  protected:
   // Casting of RegStorage
   RegStorage As32BitReg(RegStorage reg) {
diff --git a/compiler/dex/quick/x86/fp_x86.cc b/compiler/dex/quick/x86/fp_x86.cc
index 62053fd..2920fb6 100755
--- a/compiler/dex/quick/x86/fp_x86.cc
+++ b/compiler/dex/quick/x86/fp_x86.cc
@@ -292,7 +292,7 @@
         branch_normal->target = NewLIR0(kPseudoTargetLabel);
         StoreValueWide(rl_dest, rl_result);
       } else {
-        GenConversionCall(QUICK_ENTRYPOINT_OFFSET(4, pF2l), rl_dest, rl_src);
+        GenConversionCall(kQuickF2l, rl_dest, rl_src);
       }
       return;
     case Instruction::DOUBLE_TO_LONG:
@@ -317,7 +317,7 @@
         branch_normal->target = NewLIR0(kPseudoTargetLabel);
         StoreValueWide(rl_dest, rl_result);
       } else {
-        GenConversionCall(QUICK_ENTRYPOINT_OFFSET(4, pD2l), rl_dest, rl_src);
+        GenConversionCall(kQuickD2l, rl_dest, rl_src);
       }
       return;
     default:
diff --git a/compiler/dex/quick/x86/int_x86.cc b/compiler/dex/quick/x86/int_x86.cc
index 724ee7e..b9abdbf 100755
--- a/compiler/dex/quick/x86/int_x86.cc
+++ b/compiler/dex/quick/x86/int_x86.cc
@@ -1142,14 +1142,10 @@
         }
       }
       // Load array length to kArg1.
-      m2l_->OpRegMem(kOpMov, m2l_->TargetReg(kArg1, kNotWide), array_base_, len_offset_);
-      if (cu_->target64) {
-        m2l_->CallRuntimeHelperRegReg(QUICK_ENTRYPOINT_OFFSET(8, pThrowArrayBounds),
-                                      new_index, m2l_->TargetReg(kArg1, kNotWide), true);
-      } else {
-        m2l_->CallRuntimeHelperRegReg(QUICK_ENTRYPOINT_OFFSET(4, pThrowArrayBounds),
-                                      new_index, m2l_->TargetReg(kArg1, kNotWide), true);
-      }
+      X86Mir2Lir* x86_m2l = static_cast<X86Mir2Lir*>(m2l_);
+      x86_m2l->OpRegMem(kOpMov, m2l_->TargetReg(kArg1, kNotWide), array_base_, len_offset_);
+      x86_m2l->CallRuntimeHelperRegReg(kQuickThrowArrayBounds, new_index,
+                                       m2l_->TargetReg(kArg1, kNotWide), true);
     }
 
    private:
@@ -1182,17 +1178,11 @@
       GenerateTargetLabel(kPseudoThrowTarget);
 
       // Load array length to kArg1.
-      m2l_->OpRegMem(kOpMov, m2l_->TargetReg(kArg1, kNotWide), array_base_, len_offset_);
-      m2l_->LoadConstant(m2l_->TargetReg(kArg0, kNotWide), index_);
-      if (cu_->target64) {
-        m2l_->CallRuntimeHelperRegReg(QUICK_ENTRYPOINT_OFFSET(8, pThrowArrayBounds),
-                                      m2l_->TargetReg(kArg0, kNotWide),
-                                      m2l_->TargetReg(kArg1, kNotWide), true);
-      } else {
-        m2l_->CallRuntimeHelperRegReg(QUICK_ENTRYPOINT_OFFSET(4, pThrowArrayBounds),
-                                      m2l_->TargetReg(kArg0, kNotWide),
-                                      m2l_->TargetReg(kArg1, kNotWide), true);
-      }
+      X86Mir2Lir* x86_m2l = static_cast<X86Mir2Lir*>(m2l_);
+      x86_m2l->OpRegMem(kOpMov, m2l_->TargetReg(kArg1, kNotWide), array_base_, len_offset_);
+      x86_m2l->LoadConstant(m2l_->TargetReg(kArg0, kNotWide), index_);
+      x86_m2l->CallRuntimeHelperRegReg(kQuickThrowArrayBounds, m2l_->TargetReg(kArg0, kNotWide),
+                                       m2l_->TargetReg(kArg1, kNotWide), true);
     }
 
    private:
diff --git a/compiler/dex/quick/x86/target_x86.cc b/compiler/dex/quick/x86/target_x86.cc
index 451ae8b..3111025 100755
--- a/compiler/dex/quick/x86/target_x86.cc
+++ b/compiler/dex/quick/x86/target_x86.cc
@@ -871,14 +871,8 @@
   return new X86Mir2Lir(cu, mir_graph, arena);
 }
 
-// Not used in x86
-RegStorage X86Mir2Lir::LoadHelper(ThreadOffset<4> offset) {
-  LOG(FATAL) << "Unexpected use of LoadHelper in x86";
-  return RegStorage::InvalidReg();
-}
-
-// Not used in x86
-RegStorage X86Mir2Lir::LoadHelper(ThreadOffset<8> offset) {
+// Not used in x86(-64)
+RegStorage X86Mir2Lir::LoadHelper(QuickEntrypointEnum trampoline) {
   LOG(FATAL) << "Unexpected use of LoadHelper in x86";
   return RegStorage::InvalidReg();
 }
@@ -2716,4 +2710,69 @@
   return call_state;
 }
 
+bool X86Mir2Lir::GenInlinedCharAt(CallInfo* info) {
+  // Location of reference to data array
+  int value_offset = mirror::String::ValueOffset().Int32Value();
+  // Location of count
+  int count_offset = mirror::String::CountOffset().Int32Value();
+  // Starting offset within data array
+  int offset_offset = mirror::String::OffsetOffset().Int32Value();
+  // Start of char data with array_
+  int data_offset = mirror::Array::DataOffset(sizeof(uint16_t)).Int32Value();
+
+  RegLocation rl_obj = info->args[0];
+  RegLocation rl_idx = info->args[1];
+  rl_obj = LoadValue(rl_obj, kRefReg);
+  // X86 wants to avoid putting a constant index into a register.
+  if (!rl_idx.is_const) {
+    rl_idx = LoadValue(rl_idx, kCoreReg);
+  }
+  RegStorage reg_max;
+  GenNullCheck(rl_obj.reg, info->opt_flags);
+  bool range_check = (!(info->opt_flags & MIR_IGNORE_RANGE_CHECK));
+  LIR* range_check_branch = nullptr;
+  RegStorage reg_off;
+  RegStorage reg_ptr;
+  if (range_check) {
+    // On x86, we can compare to memory directly
+    // Set up a launch pad to allow retry in case of bounds violation */
+    if (rl_idx.is_const) {
+      LIR* comparison;
+      range_check_branch = OpCmpMemImmBranch(
+          kCondUlt, RegStorage::InvalidReg(), rl_obj.reg, count_offset,
+          mir_graph_->ConstantValue(rl_idx.orig_sreg), nullptr, &comparison);
+      MarkPossibleNullPointerExceptionAfter(0, comparison);
+    } else {
+      OpRegMem(kOpCmp, rl_idx.reg, rl_obj.reg, count_offset);
+      MarkPossibleNullPointerException(0);
+      range_check_branch = OpCondBranch(kCondUge, nullptr);
+    }
+  }
+  reg_off = AllocTemp();
+  reg_ptr = AllocTempRef();
+  Load32Disp(rl_obj.reg, offset_offset, reg_off);
+  LoadRefDisp(rl_obj.reg, value_offset, reg_ptr, kNotVolatile);
+  if (rl_idx.is_const) {
+    OpRegImm(kOpAdd, reg_off, mir_graph_->ConstantValue(rl_idx.orig_sreg));
+  } else {
+    OpRegReg(kOpAdd, reg_off, rl_idx.reg);
+  }
+  FreeTemp(rl_obj.reg);
+  if (rl_idx.location == kLocPhysReg) {
+    FreeTemp(rl_idx.reg);
+  }
+  RegLocation rl_dest = InlineTarget(info);
+  RegLocation rl_result = EvalLoc(rl_dest, kCoreReg, true);
+  LoadBaseIndexedDisp(reg_ptr, reg_off, 1, data_offset, rl_result.reg, kUnsignedHalf);
+  FreeTemp(reg_off);
+  FreeTemp(reg_ptr);
+  StoreValue(rl_dest, rl_result);
+  if (range_check) {
+    DCHECK(range_check_branch != nullptr);
+    info->opt_flags |= MIR_IGNORE_NULL_CHECK;  // Record that we've already null checked.
+    AddIntrinsicSlowPath(info, range_check_branch);
+  }
+  return true;
+}
+
 }  // namespace art
diff --git a/compiler/dex/quick/x86/utility_x86.cc b/compiler/dex/quick/x86/utility_x86.cc
index bae01d9..ccffe5b 100644
--- a/compiler/dex/quick/x86/utility_x86.cc
+++ b/compiler/dex/quick/x86/utility_x86.cc
@@ -1059,4 +1059,13 @@
     }
   }
 }
+
+LIR* X86Mir2Lir::InvokeTrampoline(OpKind op, RegStorage r_tgt, QuickEntrypointEnum trampoline) {
+  if (cu_->target64) {
+    return OpThreadMem(op, GetThreadOffset<8>(trampoline));
+  } else {
+    return OpThreadMem(op, GetThreadOffset<4>(trampoline));
+  }
+}
+
 }  // namespace art
diff --git a/runtime/entrypoints/quick/quick_entrypoints.h b/runtime/entrypoints/quick/quick_entrypoints.h
index 473687c..8c108a8 100644
--- a/runtime/entrypoints/quick/quick_entrypoints.h
+++ b/runtime/entrypoints/quick/quick_entrypoints.h
@@ -37,111 +37,11 @@
 
 // Pointers to functions that are called by quick compiler generated code via thread-local storage.
 struct PACKED(4) QuickEntryPoints {
-  // Alloc
-  void* (*pAllocArray)(uint32_t, void*, int32_t);
-  void* (*pAllocArrayResolved)(void*, void*, int32_t);
-  void* (*pAllocArrayWithAccessCheck)(uint32_t, void*, int32_t);
-  void* (*pAllocObject)(uint32_t, void*);
-  void* (*pAllocObjectResolved)(void*, void*);
-  void* (*pAllocObjectInitialized)(void*, void*);
-  void* (*pAllocObjectWithAccessCheck)(uint32_t, void*);
-  void* (*pCheckAndAllocArray)(uint32_t, void*, int32_t);
-  void* (*pCheckAndAllocArrayWithAccessCheck)(uint32_t, void*, int32_t);
-
-  // Cast
-  uint32_t (*pInstanceofNonTrivial)(const mirror::Class*, const mirror::Class*);
-  void (*pCheckCast)(void*, void*);
-
-  // DexCache
-  void* (*pInitializeStaticStorage)(uint32_t, void*);
-  void* (*pInitializeTypeAndVerifyAccess)(uint32_t, void*);
-  void* (*pInitializeType)(uint32_t, void*);
-  void* (*pResolveString)(void*, uint32_t);
-
-  // Field
-  int (*pSet32Instance)(uint32_t, void*, int32_t);  // field_idx, obj, src
-  int (*pSet32Static)(uint32_t, int32_t);
-  int (*pSet64Instance)(uint32_t, void*, int64_t);
-  int (*pSet64Static)(uint32_t, int64_t);
-  int (*pSetObjInstance)(uint32_t, void*, void*);
-  int (*pSetObjStatic)(uint32_t, void*);
-  int32_t (*pGet32Instance)(uint32_t, void*);
-  int32_t (*pGet32Static)(uint32_t);
-  int64_t (*pGet64Instance)(uint32_t, void*);
-  int64_t (*pGet64Static)(uint32_t);
-  void* (*pGetObjInstance)(uint32_t, void*);
-  void* (*pGetObjStatic)(uint32_t);
-
-  // Array
-  void (*pAputObjectWithNullAndBoundCheck)(void*, uint32_t, void*);  // array, index, src
-  void (*pAputObjectWithBoundCheck)(void*, uint32_t, void*);  // array, index, src
-  void (*pAputObject)(void*, uint32_t, void*);  // array, index, src
-  void (*pHandleFillArrayData)(void*, void*);
-
-  // JNI
-  uint32_t (*pJniMethodStart)(Thread*);
-  uint32_t (*pJniMethodStartSynchronized)(jobject to_lock, Thread* self);
-  void (*pJniMethodEnd)(uint32_t cookie, Thread* self);
-  void (*pJniMethodEndSynchronized)(uint32_t cookie, jobject locked, Thread* self);
-  mirror::Object* (*pJniMethodEndWithReference)(jobject result, uint32_t cookie, Thread* self);
-  mirror::Object* (*pJniMethodEndWithReferenceSynchronized)(jobject result, uint32_t cookie,
-                                                    jobject locked, Thread* self);
-  void (*pQuickGenericJniTrampoline)(mirror::ArtMethod*);
-
-  // Locks
-  void (*pLockObject)(void*);
-  void (*pUnlockObject)(void*);
-
-  // Math
-  int32_t (*pCmpgDouble)(double, double);
-  int32_t (*pCmpgFloat)(float, float);
-  int32_t (*pCmplDouble)(double, double);
-  int32_t (*pCmplFloat)(float, float);
-  double (*pFmod)(double, double);
-  double (*pL2d)(int64_t);
-  float (*pFmodf)(float, float);
-  float (*pL2f)(int64_t);
-  int32_t (*pD2iz)(double);
-  int32_t (*pF2iz)(float);
-  int32_t (*pIdivmod)(int32_t, int32_t);
-  int64_t (*pD2l)(double);
-  int64_t (*pF2l)(float);
-  int64_t (*pLdiv)(int64_t, int64_t);
-  int64_t (*pLmod)(int64_t, int64_t);
-  int64_t (*pLmul)(int64_t, int64_t);
-  uint64_t (*pShlLong)(uint64_t, uint32_t);
-  uint64_t (*pShrLong)(uint64_t, uint32_t);
-  uint64_t (*pUshrLong)(uint64_t, uint32_t);
-
-  // Intrinsics
-  int32_t (*pIndexOf)(void*, uint32_t, uint32_t, uint32_t);
-  int32_t (*pStringCompareTo)(void*, void*);
-  void* (*pMemcpy)(void*, const void*, size_t);
-
-  // Invocation
-  void (*pQuickImtConflictTrampoline)(mirror::ArtMethod*);
-  void (*pQuickResolutionTrampoline)(mirror::ArtMethod*);
-  void (*pQuickToInterpreterBridge)(mirror::ArtMethod*);
-  void (*pInvokeDirectTrampolineWithAccessCheck)(uint32_t, void*);
-  void (*pInvokeInterfaceTrampolineWithAccessCheck)(uint32_t, void*);
-  void (*pInvokeStaticTrampolineWithAccessCheck)(uint32_t, void*);
-  void (*pInvokeSuperTrampolineWithAccessCheck)(uint32_t, void*);
-  void (*pInvokeVirtualTrampolineWithAccessCheck)(uint32_t, void*);
-
-  // Thread
-  void (*pTestSuspend)();  // Stub that is periodically called to test the suspend count
-
-  // Throws
-  void (*pDeliverException)(void*);
-  void (*pThrowArrayBounds)(int32_t, int32_t);
-  void (*pThrowDivZero)();
-  void (*pThrowNoSuchMethod)(int32_t);
-  void (*pThrowNullPointer)();
-  void (*pThrowStackOverflow)(void*);
-
-  // Atomic 64-bit load/store
-  int64_t (*pA64Load)(volatile const int64_t *);
-  void (*pA64Store)(volatile int64_t *, int64_t);
+#define ENTRYPOINT_ENUM(name, rettype, ...) rettype ( * p ## name )( __VA_ARGS__ );
+#include "quick_entrypoints_list.h"
+  QUICK_ENTRYPOINT_LIST(ENTRYPOINT_ENUM)
+#undef QUICK_ENTRYPOINT_LIST
+#undef ENTRYPOINT_ENUM
 };
 
 
diff --git a/runtime/entrypoints/quick/quick_entrypoints_enum.h b/runtime/entrypoints/quick/quick_entrypoints_enum.h
new file mode 100644
index 0000000..84158cd
--- /dev/null
+++ b/runtime/entrypoints/quick/quick_entrypoints_enum.h
@@ -0,0 +1,56 @@
+/*
+ * Copyright (C) 2014 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef ART_RUNTIME_ENTRYPOINTS_QUICK_QUICK_ENTRYPOINTS_ENUM_H_
+#define ART_RUNTIME_ENTRYPOINTS_QUICK_QUICK_ENTRYPOINTS_ENUM_H_
+
+#include "quick_entrypoints.h"
+#include "thread.h"
+
+namespace art {
+
+// Define an enum for the entrypoints. Names are prepended a 'kQuick'.
+enum QuickEntrypointEnum
+{  // NOLINT(whitespace/braces)
+#define ENTRYPOINT_ENUM(name, rettype, ...) kQuick ## name,
+#include "quick_entrypoints_list.h"
+  QUICK_ENTRYPOINT_LIST(ENTRYPOINT_ENUM)
+#undef QUICK_ENTRYPOINT_LIST
+#undef ENTRYPOINT_ENUM
+};
+
+std::ostream& operator<<(std::ostream& os, const QuickEntrypointEnum& kind);
+
+// Translate a QuickEntrypointEnum value to the corresponding ThreadOffset.
+template <size_t pointer_size>
+static ThreadOffset<pointer_size> GetThreadOffset(QuickEntrypointEnum trampoline) {
+  switch (trampoline)
+  {  // NOLINT(whitespace/braces)
+  #define ENTRYPOINT_ENUM(name, rettype, ...) case kQuick ## name : \
+      return QUICK_ENTRYPOINT_OFFSET(pointer_size, p ## name);
+  #include "quick_entrypoints_list.h"
+    QUICK_ENTRYPOINT_LIST(ENTRYPOINT_ENUM)
+  #undef QUICK_ENTRYPOINT_LIST
+  #undef ENTRYPOINT_ENUM
+  };
+  LOG(FATAL) << "Unexpected trampoline " << static_cast<int>(trampoline);
+  return ThreadOffset<pointer_size>(-1);
+}
+
+}  // namespace art
+
+
+#endif  // ART_RUNTIME_ENTRYPOINTS_QUICK_QUICK_ENTRYPOINTS_ENUM_H_
diff --git a/runtime/entrypoints/quick/quick_entrypoints_list.h b/runtime/entrypoints/quick/quick_entrypoints_list.h
new file mode 100644
index 0000000..f858743
--- /dev/null
+++ b/runtime/entrypoints/quick/quick_entrypoints_list.h
@@ -0,0 +1,117 @@
+/*
+ * Copyright (C) 2014 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef ART_RUNTIME_ENTRYPOINTS_QUICK_QUICK_ENTRYPOINTS_LIST_H_
+#define ART_RUNTIME_ENTRYPOINTS_QUICK_QUICK_ENTRYPOINTS_LIST_H_
+
+// All quick entrypoints. Format is name, return type, argument types.
+
+#define QUICK_ENTRYPOINT_LIST(V) \
+  V(AllocArray, void*, uint32_t, void*, int32_t) \
+  V(AllocArrayResolved, void*, void*, void*, int32_t) \
+  V(AllocArrayWithAccessCheck, void*, uint32_t, void*, int32_t) \
+  V(AllocObject, void*, uint32_t, void*) \
+  V(AllocObjectResolved, void*, void*, void*) \
+  V(AllocObjectInitialized, void*, void*, void*) \
+  V(AllocObjectWithAccessCheck, void*, uint32_t, void*) \
+  V(CheckAndAllocArray, void*, uint32_t, void*, int32_t) \
+  V(CheckAndAllocArrayWithAccessCheck, void*, uint32_t, void*, int32_t) \
+\
+  V(InstanceofNonTrivial, uint32_t, const mirror::Class*, const mirror::Class*) \
+  V(CheckCast, void , void*, void*) \
+\
+  V(InitializeStaticStorage, void*, uint32_t, void*) \
+  V(InitializeTypeAndVerifyAccess, void*, uint32_t, void*) \
+  V(InitializeType, void*, uint32_t, void*) \
+  V(ResolveString, void*, void*, uint32_t) \
+\
+  V(Set32Instance, int, uint32_t, void*, int32_t) \
+  V(Set32Static, int, uint32_t, int32_t) \
+  V(Set64Instance, int, uint32_t, void*, int64_t) \
+  V(Set64Static, int, uint32_t, int64_t) \
+  V(SetObjInstance, int, uint32_t, void*, void*) \
+  V(SetObjStatic, int, uint32_t, void*) \
+  V(Get32Instance, int32_t, uint32_t, void*) \
+  V(Get32Static, int32_t, uint32_t) \
+  V(Get64Instance, int64_t, uint32_t, void*) \
+  V(Get64Static, int64_t, uint32_t) \
+  V(GetObjInstance, void*, uint32_t, void*) \
+  V(GetObjStatic, void*, uint32_t) \
+\
+  V(AputObjectWithNullAndBoundCheck, void, void*, uint32_t, void*) \
+  V(AputObjectWithBoundCheck, void, void*, uint32_t, void*) \
+  V(AputObject, void, void*, uint32_t, void*) \
+  V(HandleFillArrayData, void, void*, void*) \
+\
+  V(JniMethodStart, uint32_t, Thread*) \
+  V(JniMethodStartSynchronized, uint32_t, jobject to_lock, Thread* self) \
+  V(JniMethodEnd, void, uint32_t cookie, Thread* self) \
+  V(JniMethodEndSynchronized, void, uint32_t cookie, jobject locked, Thread* self) \
+  V(JniMethodEndWithReference, mirror::Object*, jobject result, uint32_t cookie, Thread* self) \
+  V(JniMethodEndWithReferenceSynchronized, mirror::Object*, jobject result, uint32_t cookie, jobject locked, Thread* self) \
+  V(QuickGenericJniTrampoline, void, mirror::ArtMethod*) \
+\
+  V(LockObject, void, void*) \
+  V(UnlockObject, void, void*) \
+\
+  V(CmpgDouble, int32_t, double, double) \
+  V(CmpgFloat, int32_t, float, float) \
+  V(CmplDouble, int32_t, double, double) \
+  V(CmplFloat, int32_t, float, float) \
+  V(Fmod, double, double, double) \
+  V(L2d, double, int64_t) \
+  V(Fmodf, float, float, float) \
+  V(L2f, float, int64_t) \
+  V(D2iz, int32_t, double) \
+  V(F2iz, int32_t, float) \
+  V(Idivmod, int32_t, int32_t, int32_t) \
+  V(D2l, int64_t, double) \
+  V(F2l, int64_t, float) \
+  V(Ldiv, int64_t, int64_t, int64_t) \
+  V(Lmod, int64_t, int64_t, int64_t) \
+  V(Lmul, int64_t, int64_t, int64_t) \
+  V(ShlLong, uint64_t, uint64_t, uint32_t) \
+  V(ShrLong, uint64_t, uint64_t, uint32_t) \
+  V(UshrLong, uint64_t, uint64_t, uint32_t) \
+\
+  V(IndexOf, int32_t, void*, uint32_t, uint32_t, uint32_t) \
+  V(StringCompareTo, int32_t, void*, void*) \
+  V(Memcpy, void*, void*, const void*, size_t) \
+\
+  V(QuickImtConflictTrampoline, void, mirror::ArtMethod*) \
+  V(QuickResolutionTrampoline, void, mirror::ArtMethod*) \
+  V(QuickToInterpreterBridge, void, mirror::ArtMethod*) \
+  V(InvokeDirectTrampolineWithAccessCheck, void, uint32_t, void*) \
+  V(InvokeInterfaceTrampolineWithAccessCheck, void, uint32_t, void*) \
+  V(InvokeStaticTrampolineWithAccessCheck, void, uint32_t, void*) \
+  V(InvokeSuperTrampolineWithAccessCheck, void, uint32_t, void*) \
+  V(InvokeVirtualTrampolineWithAccessCheck, void, uint32_t, void*) \
+\
+  V(TestSuspend, void, void) \
+\
+  V(DeliverException, void, void*) \
+  V(ThrowArrayBounds, void, int32_t, int32_t) \
+  V(ThrowDivZero, void, void) \
+  V(ThrowNoSuchMethod, void, int32_t) \
+  V(ThrowNullPointer, void, void) \
+  V(ThrowStackOverflow, void, void*) \
+\
+  V(A64Load, int64_t, volatile const int64_t *) \
+  V(A64Store, void, volatile int64_t *, int64_t)
+
+
+#endif  // ART_RUNTIME_ENTRYPOINTS_QUICK_QUICK_ENTRYPOINTS_LIST_H_
+#undef ART_RUNTIME_ENTRYPOINTS_QUICK_QUICK_ENTRYPOINTS_LIST_H_   // #define is only for lint.