Make ART compile with GCC -O0 again.

Tidy up InstructionSetFeatures so that it has a type hierarchy dependent on
architecture.
Add to instruction_set_test to warn when InstructionSetFeatures don't agree
with ones from system properties, AT_HWCAP and /proc/cpuinfo.
Clean-up class linker entry point logic to not return entry points but to
test whether the passed code is the particular entrypoint. This works around
image trampolines that replicate entrypoints.
Bug: 17993736

Change-Id: I5f4b49e88c3b02a79f9bee04f83395146ed7be23
diff --git a/build/Android.common_build.mk b/build/Android.common_build.mk
index 14edb71..ef97d03 100644
--- a/build/Android.common_build.mk
+++ b/build/Android.common_build.mk
@@ -93,8 +93,8 @@
 #
 # Used to change the default GC. Valid values are CMS, SS, GSS. The default is CMS.
 #
-ART_DEFAULT_GC_TYPE ?= CMS
-ART_DEFAULT_GC_TYPE_CFLAGS := -DART_DEFAULT_GC_TYPE_IS_$(ART_DEFAULT_GC_TYPE)
+art_default_gc_type ?= CMS
+art_default_gc_type_cflags := -DART_DEFAULT_GC_TYPE_IS_$(art_default_gc_type)
 
 ifeq ($(ART_USE_PORTABLE_COMPILER),true)
   LLVM_ROOT_PATH := external/llvm
@@ -131,30 +131,6 @@
       endif)
 endef
 
-ART_CPP_EXTENSION := .cc
-
-ART_C_INCLUDES := \
-  external/gtest/include \
-  external/valgrind/main/include \
-  external/valgrind/main \
-  external/vixl/src \
-  external/zlib \
-  frameworks/compile/mclinker/include
-
-art_cflags := \
-  -fno-rtti \
-  -std=gnu++11 \
-  -ggdb3 \
-  -Wall \
-  -Werror \
-  -Wextra \
-  -Wno-sign-promo \
-  -Wno-unused-parameter \
-  -Wstrict-aliasing \
-  -fstrict-aliasing \
-  -Wunreachable-code \
-  -fvisibility=protected
-
 ART_TARGET_CLANG_CFLAGS :=
 ART_TARGET_CLANG_CFLAGS_arm :=
 ART_TARGET_CLANG_CFLAGS_arm64 :=
@@ -168,6 +144,58 @@
   -DNVALGRIND \
   -Wno-unused-value
 
+# FIXME: upstream LLVM has a vectorizer bug that needs to be fixed
+ART_TARGET_CLANG_CFLAGS_arm64 += \
+  -fno-vectorize
+
+# Colorize clang compiler warnings.
+art_clang_cflags := -fcolor-diagnostics
+
+# Warn about thread safety violations with clang.
+art_clang_cflags += -Wthread-safety
+
+# Warn if switch fallthroughs aren't annotated.
+art_clang_cflags += -Wimplicit-fallthrough
+
+# Enable float equality warnings.
+art_clang_cflags += -Wfloat-equal
+
+ifeq ($(ART_HOST_CLANG),true)
+  ART_HOST_CFLAGS += $(art_clang_cflags)
+endif
+ifeq ($(ART_TARGET_CLANG),true)
+  ART_TARGET_CFLAGS += $(art_clang_cflags)
+endif
+
+# Clear local variable now its use has ended.
+art_clang_cflags :=
+
+ART_CPP_EXTENSION := .cc
+
+ART_C_INCLUDES := \
+  external/gtest/include \
+  external/valgrind/main/include \
+  external/valgrind/main \
+  external/vixl/src \
+  external/zlib \
+  frameworks/compile/mclinker/include
+
+# Base set of cflags used by all things ART.
+art_cflags := \
+  -fno-rtti \
+  -std=gnu++11 \
+  -ggdb3 \
+  -Wall \
+  -Werror \
+  -Wextra \
+  -Wno-sign-promo \
+  -Wno-unused-parameter \
+  -Wstrict-aliasing \
+  -fstrict-aliasing \
+  -Wunreachable-code \
+  -fvisibility=protected \
+  $(art_default_gc_type_cflags)
+
 ifeq ($(ART_SMALL_MODE),true)
   art_cflags += -DART_SMALL_MODE=1
 endif
@@ -176,14 +204,19 @@
   art_cflags += -DART_SEA_IR_MODE=1
 endif
 
+# Cflags for non-debug ART and ART tools.
 art_non_debug_cflags := \
   -O3
 
-art_host_non_debug_cflags := \
-  $(art_non_debug_cflags)
+# Cflags for debug ART and ART tools.
+art_debug_cflags := \
+  -O2 \
+  -DDYNAMIC_ANNOTATIONS_ENABLED=1 \
+  -UNDEBUG \
+  -fkeep-inline-functions
 
-art_target_non_debug_cflags := \
-  $(art_non_debug_cflags)
+art_host_non_debug_cflags := $(art_non_debug_cflags)
+art_target_non_debug_cflags := $(art_non_debug_cflags)
 
 ifeq ($(HOST_OS),linux)
   # Larger frame-size for host clang builds today
@@ -191,27 +224,22 @@
   art_target_non_debug_cflags += -Wframe-larger-than=1728
 endif
 
-# FIXME: upstream LLVM has a vectorizer bug that needs to be fixed
-ART_TARGET_CLANG_CFLAGS_arm64 += \
-  -fno-vectorize
-
-art_debug_cflags := \
-  -O2 \
-  -DDYNAMIC_ANNOTATIONS_ENABLED=1 \
-  -UNDEBUG
-
 ifndef LIBART_IMG_HOST_BASE_ADDRESS
   $(error LIBART_IMG_HOST_BASE_ADDRESS unset)
 endif
 ART_HOST_CFLAGS := $(art_cflags) -DANDROID_SMP=1 -DART_BASE_ADDRESS=$(LIBART_IMG_HOST_BASE_ADDRESS)
 ART_HOST_CFLAGS += -DART_DEFAULT_INSTRUCTION_SET_FEATURES=default
-ART_HOST_CFLAGS += $(ART_DEFAULT_GC_TYPE_CFLAGS)
 
 ifndef LIBART_IMG_TARGET_BASE_ADDRESS
   $(error LIBART_IMG_TARGET_BASE_ADDRESS unset)
 endif
 ART_TARGET_CFLAGS := $(art_cflags) -DART_TARGET -DART_BASE_ADDRESS=$(LIBART_IMG_TARGET_BASE_ADDRESS)
 
+ART_HOST_NON_DEBUG_CFLAGS := $(art_host_non_debug_cflags)
+ART_TARGET_NON_DEBUG_CFLAGS := $(art_target_non_debug_cflags)
+ART_HOST_DEBUG_CFLAGS := $(art_debug_cflags)
+ART_TARGET_DEBUG_CFLAGS := $(art_debug_cflags)
+
 ifndef LIBART_IMG_HOST_MIN_BASE_ADDRESS_DELTA
   LIBART_IMG_HOST_MIN_BASE_ADDRESS_DELTA=-0x1000000
 endif
@@ -230,25 +258,6 @@
 ART_TARGET_CFLAGS += -DART_BASE_ADDRESS_MIN_DELTA=$(LIBART_IMG_TARGET_MIN_BASE_ADDRESS_DELTA)
 ART_TARGET_CFLAGS += -DART_BASE_ADDRESS_MAX_DELTA=$(LIBART_IMG_TARGET_MAX_BASE_ADDRESS_DELTA)
 
-# Colorize clang compiler warnings.
-art_clang_cflags := -fcolor-diagnostics
-
-# Warn if switch fallthroughs aren't annotated.
-art_clang_cflags += -Wimplicit-fallthrough
-
-# Enable float equality warnings.
-art_clang_cflags += -Wfloat-equal
-
-ifeq ($(ART_HOST_CLANG),true)
-  ART_HOST_CFLAGS += $(art_clang_cflags)
-endif
-ifeq ($(ART_TARGET_CLANG),true)
-  ART_TARGET_CFLAGS += $(art_clang_cflags)
-endif
-
-art_clang_cflags :=
-
-ART_TARGET_LDFLAGS :=
 ifeq ($(TARGET_CPU_SMP),true)
   ART_TARGET_CFLAGS += -DANDROID_SMP=1
 else
@@ -260,60 +269,26 @@
     ART_TARGET_CFLAGS += -DANDROID_SMP=1
   endif
 endif
-ART_TARGET_CFLAGS += $(ART_DEFAULT_GC_TYPE_CFLAGS)
-
-# DEX2OAT_TARGET_INSTRUCTION_SET_FEATURES is set in ../build/core/dex_preopt.mk based on
-# the TARGET_CPU_VARIANT
-ifeq ($(DEX2OAT_TARGET_INSTRUCTION_SET_FEATURES),)
-$(error Required DEX2OAT_TARGET_INSTRUCTION_SET_FEATURES is not set)
-endif
-ART_TARGET_CFLAGS += -DART_DEFAULT_INSTRUCTION_SET_FEATURES=$(DEX2OAT_TARGET_INSTRUCTION_SET_FEATURES)
-
-# Enable thread-safety for GCC 4.6, and clang, but not for GCC 4.7 or later where this feature was
-# removed. Warn when -Wthread-safety is not used.
-ifneq ($(filter 4.6 4.6.%, $(TARGET_GCC_VERSION)),)
-  ART_TARGET_CFLAGS += -Wthread-safety
-else
-  # FIXME: add -Wthread-safety when the problem is fixed
-  ifeq ($(ART_TARGET_CLANG),true)
-    ART_TARGET_CFLAGS +=
-  else
-    # Warn if -Wthread-safety is not supported and not doing a top-level or 'mma' build.
-    ifneq ($(ONE_SHOT_MAKEFILE),)
-      # Enable target GCC 4.6 with: export TARGET_GCC_VERSION_EXP=4.6
-      $(info Using target GCC $(TARGET_GCC_VERSION) disables thread-safety checks.)
-    endif
-  endif
-endif
-# We compile with GCC 4.6 or clang on the host, both of which support -Wthread-safety.
-ART_HOST_CFLAGS += -Wthread-safety
 
 # To use oprofile_android --callgraph, uncomment this and recompile with "mmm art -B -j16"
 # ART_TARGET_CFLAGS += -fno-omit-frame-pointer -marm -mapcs
 
-# Addition CPU specific CFLAGS.
-ifeq ($(TARGET_ARCH),arm)
-  ifneq ($(filter cortex-a15, $(TARGET_CPU_VARIANT)),)
-    # Fake a ARM feature for LPAE support.
-    ART_TARGET_CFLAGS += -D__ARM_FEATURE_LPAE=1
-  endif
+# Clear locals now they've served their purpose.
+art_cflags :=
+art_debug_cflags :=
+art_non_debug_cflags :=
+art_host_non_debug_cflags :=
+art_target_non_debug_cflags :=
+art_default_gc_type :=
+art_default_gc_type_cflags :=
+
+ART_HOST_LDLIBS :=
+ifneq ($(ART_HOST_CLANG),true)
+  # GCC lacks libc++ assumed atomic operations, grab via libatomic.
+  ART_HOST_LDLIBS += -latomic
 endif
 
-ART_HOST_NON_DEBUG_CFLAGS := $(art_host_non_debug_cflags)
-ART_TARGET_NON_DEBUG_CFLAGS := $(art_target_non_debug_cflags)
-
-# TODO: move -fkeep-inline-functions to art_debug_cflags when target gcc > 4.4 (and -lsupc++)
-ART_HOST_DEBUG_CFLAGS := $(art_debug_cflags) -fkeep-inline-functions
-ART_HOST_DEBUG_LDLIBS := -lsupc++
-
-ifneq ($(HOST_OS),linux)
-  # Some Mac OS pthread header files are broken with -fkeep-inline-functions.
-  ART_HOST_DEBUG_CFLAGS := $(filter-out -fkeep-inline-functions,$(ART_HOST_DEBUG_CFLAGS))
-  # Mac OS doesn't have libsupc++.
-  ART_HOST_DEBUG_LDLIBS := $(filter-out -lsupc++,$(ART_HOST_DEBUG_LDLIBS))
-endif
-
-ART_TARGET_DEBUG_CFLAGS := $(art_debug_cflags)
+ART_TARGET_LDFLAGS :=
 
 # $(1): ndebug_or_debug
 define set-target-local-cflags-vars
@@ -337,6 +312,7 @@
   art_target_cflags_ndebug_or_debug :=
 endef
 
+# Support for disabling certain builds.
 ART_BUILD_TARGET := false
 ART_BUILD_HOST := false
 ART_BUILD_NDEBUG := false
@@ -358,12 +334,4 @@
   ART_BUILD_DEBUG := true
 endif
 
-# Clear locally defined variables that aren't necessary in the rest of the build system.
-ART_DEFAULT_GC_TYPE :=
-ART_DEFAULT_GC_TYPE_CFLAGS :=
-art_cflags :=
-art_target_non_debug_cflags :=
-art_host_non_debug_cflags :=
-art_non_debug_cflags :=
-
 endif # ANDROID_COMMON_BUILD_MK
diff --git a/build/Android.executable.mk b/build/Android.executable.mk
index 02252ab..81f3297 100644
--- a/build/Android.executable.mk
+++ b/build/Android.executable.mk
@@ -76,6 +76,7 @@
     LOCAL_SHARED_LIBRARIES += libdl
   else # host
     LOCAL_CLANG := $(ART_HOST_CLANG)
+    LOCAL_LDLIBS := $(ART_HOST_LDLIBS)
     LOCAL_CFLAGS += $(ART_HOST_CFLAGS)
     ifeq ($$(art_ndebug_or_debug),debug)
       LOCAL_CFLAGS += $(ART_HOST_DEBUG_CFLAGS)
diff --git a/build/Android.gtest.mk b/build/Android.gtest.mk
index 3e100e9..38d3f1c 100644
--- a/build/Android.gtest.mk
+++ b/build/Android.gtest.mk
@@ -217,10 +217,7 @@
 LOCAL_C_INCLUDES := $(ART_C_INCLUDES) art/runtime art/compiler
 LOCAL_SHARED_LIBRARIES := libartd libartd-compiler
 LOCAL_STATIC_LIBRARIES := libcutils
-ifneq ($(WITHOUT_HOST_CLANG),true)
-  # GCC host compiled tests fail with this linked, presumably due to destructors that run.
-  LOCAL_STATIC_LIBRARIES += libgtest_libc++_host
-endif
+LOCAL_STATIC_LIBRARIES += libgtest_libc++_host
 LOCAL_LDLIBS += -ldl -lpthread
 LOCAL_MULTILIB := both
 LOCAL_CLANG := $(ART_HOST_CLANG)
@@ -264,7 +261,7 @@
 	  && (adb pull $(ART_TARGET_TEST_DIR)/$(TARGET_$(2)ARCH)/$$@-$$$$PPID /tmp/ \
 	      && $$(call ART_TEST_PASSED,$$@)) \
 	  || $$(call ART_TEST_FAILED,$$@))
-	$(hide) rm /tmp/$$@-$$$$PPID
+	$(hide) rm -f /tmp/$$@-$$$$PPID
 
   ART_TEST_TARGET_GTEST$($(2)ART_PHONY_TEST_TARGET_SUFFIX)_RULES += $$(gtest_rule)
   ART_TEST_TARGET_GTEST_RULES += $$(gtest_rule)
@@ -377,7 +374,7 @@
     LOCAL_CFLAGS += $$(ART_HOST_CFLAGS) $$(ART_HOST_DEBUG_CFLAGS)
     LOCAL_SHARED_LIBRARIES += libicuuc-host libicui18n-host libnativehelper libz-host
     LOCAL_STATIC_LIBRARIES += libcutils libvixl
-    LOCAL_LDLIBS += -lpthread -ldl
+    LOCAL_LDLIBS := $(ART_HOST_LDLIBS) -lpthread -ldl
     LOCAL_IS_HOST_MODULE := true
     LOCAL_MULTILIB := both
     LOCAL_MODULE_STEM_32 := $$(art_gtest_name)32
diff --git a/compiler/Android.mk b/compiler/Android.mk
index edc5bd0..f413576 100644
--- a/compiler/Android.mk
+++ b/compiler/Android.mk
@@ -223,6 +223,7 @@
   else # host
     LOCAL_CLANG := $(ART_HOST_CLANG)
     LOCAL_CFLAGS += $(ART_HOST_CFLAGS)
+    LOCAL_LDLIBS := $(ART_HOST_LDLIBS)
     ifeq ($$(art_ndebug_or_debug),debug)
       LOCAL_CFLAGS += $(ART_HOST_DEBUG_CFLAGS)
     else
diff --git a/compiler/common_compiler_test.cc b/compiler/common_compiler_test.cc
index d1d47fb..9a5f74d 100644
--- a/compiler/common_compiler_test.cc
+++ b/compiler/common_compiler_test.cc
@@ -16,18 +16,12 @@
 
 #include "common_compiler_test.h"
 
-#if defined(__arm__)
-#include <sys/ucontext.h>
-#endif
-#include <fstream>
-
 #include "class_linker.h"
 #include "compiled_method.h"
 #include "dex/quick_compiler_callbacks.h"
 #include "dex/verification_results.h"
 #include "dex/quick/dex_file_to_method_inliner_map.h"
 #include "driver/compiler_driver.h"
-#include "entrypoints/entrypoint_utils.h"
 #include "interpreter/interpreter.h"
 #include "mirror/art_method.h"
 #include "mirror/dex_cache.h"
@@ -38,128 +32,9 @@
 
 namespace art {
 
-// Normally the ClassLinker supplies this.
-extern "C" void art_quick_generic_jni_trampoline(mirror::ArtMethod*);
-
-#if defined(__arm__)
-// A signal handler called when have an illegal instruction.  We record the fact in
-// a global boolean and then increment the PC in the signal context to return to
-// the next instruction.  We know the instruction is an sdiv (4 bytes long).
-static void baddivideinst(int signo, siginfo *si, void *data) {
-  UNUSED(signo);
-  UNUSED(si);
-  struct ucontext *uc = (struct ucontext *)data;
-  struct sigcontext *sc = &uc->uc_mcontext;
-  sc->arm_r0 = 0;     // set R0 to #0 to signal error
-  sc->arm_pc += 4;    // skip offending instruction
-}
-
-// This is in arch/arm/arm_sdiv.S.  It does the following:
-// mov r1,#1
-// sdiv r0,r1,r1
-// bx lr
-//
-// the result will be the value 1 if sdiv is supported.  If it is not supported
-// a SIGILL signal will be raised and the signal handler (baddivideinst) called.
-// The signal handler sets r0 to #0 and then increments pc beyond the failed instruction.
-// Thus if the instruction is not supported, the result of this function will be #0
-
-extern "C" bool CheckForARMSDIVInstruction();
-
-static InstructionSetFeatures GuessInstructionFeatures() {
-  InstructionSetFeatures f;
-
-  // Uncomment this for processing of /proc/cpuinfo.
-  if (false) {
-    // Look in /proc/cpuinfo for features we need.  Only use this when we can guarantee that
-    // the kernel puts the appropriate feature flags in here.  Sometimes it doesn't.
-    std::ifstream in("/proc/cpuinfo");
-    if (in) {
-      while (!in.eof()) {
-        std::string line;
-        std::getline(in, line);
-        if (!in.eof()) {
-          if (line.find("Features") != std::string::npos) {
-            if (line.find("idivt") != std::string::npos) {
-              f.SetHasDivideInstruction(true);
-            }
-          }
-        }
-        in.close();
-      }
-    } else {
-      LOG(INFO) << "Failed to open /proc/cpuinfo";
-    }
-  }
-
-  // See if have a sdiv instruction.  Register a signal handler and try to execute
-  // an sdiv instruction.  If we get a SIGILL then it's not supported.  We can't use
-  // the /proc/cpuinfo method for this because Krait devices don't always put the idivt
-  // feature in the list.
-  struct sigaction sa, osa;
-  sa.sa_flags = SA_ONSTACK | SA_RESTART | SA_SIGINFO;
-  sa.sa_sigaction = baddivideinst;
-  sigaction(SIGILL, &sa, &osa);
-
-  if (CheckForARMSDIVInstruction()) {
-    f.SetHasDivideInstruction(true);
-  }
-
-  // Restore the signal handler.
-  sigaction(SIGILL, &osa, nullptr);
-
-  // Other feature guesses in here.
-  return f;
-}
-#endif
-
-// Given a set of instruction features from the build, parse it.  The
-// input 'str' is a comma separated list of feature names.  Parse it and
-// return the InstructionSetFeatures object.
-static InstructionSetFeatures ParseFeatureList(std::string str) {
-  InstructionSetFeatures result;
-  typedef std::vector<std::string> FeatureList;
-  FeatureList features;
-  Split(str, ',', features);
-  for (FeatureList::iterator i = features.begin(); i != features.end(); i++) {
-    std::string feature = Trim(*i);
-    if (feature == "default") {
-      // Nothing to do.
-    } else if (feature == "div") {
-      // Supports divide instruction.
-      result.SetHasDivideInstruction(true);
-    } else if (feature == "nodiv") {
-      // Turn off support for divide instruction.
-      result.SetHasDivideInstruction(false);
-    } else {
-      LOG(FATAL) << "Unknown instruction set feature: '" << feature << "'";
-    }
-  }
-  // Others...
-  return result;
-}
-
 CommonCompilerTest::CommonCompilerTest() {}
 CommonCompilerTest::~CommonCompilerTest() {}
 
-OatFile::OatMethod CommonCompilerTest::CreateOatMethod(const void* code, const uint8_t* gc_map) {
-  CHECK(code != nullptr);
-  const uint8_t* base;
-  uint32_t code_offset, gc_map_offset;
-  if (gc_map == nullptr) {
-    base = reinterpret_cast<const uint8_t*>(code);  // Base of data points at code.
-    base -= sizeof(void*);  // Move backward so that code_offset != 0.
-    code_offset = sizeof(void*);
-    gc_map_offset = 0;
-  } else {
-    // TODO: 64bit support.
-    base = nullptr;  // Base of data in oat file, ie 0.
-    code_offset = PointerToLowMemUInt32(code);
-    gc_map_offset = PointerToLowMemUInt32(gc_map);
-  }
-  return OatFile::OatMethod(base, code_offset, gc_map_offset);
-}
-
 void CommonCompilerTest::MakeExecutable(mirror::ArtMethod* method) {
   CHECK(method != nullptr);
 
@@ -174,7 +49,8 @@
   if (compiled_method != nullptr) {
     const std::vector<uint8_t>* code = compiled_method->GetQuickCode();
     const void* code_ptr;
-    if (code != nullptr) {
+    bool is_portable = (code == nullptr);
+    if (!is_portable) {
       uint32_t code_size = code->size();
       CHECK_NE(0u, code_size);
       const std::vector<uint8_t>& vmap_table = compiled_method->GetVmapTable();
@@ -210,33 +86,11 @@
     const void* method_code = CompiledMethod::CodePointer(code_ptr,
                                                           compiled_method->GetInstructionSet());
     LOG(INFO) << "MakeExecutable " << PrettyMethod(method) << " code=" << method_code;
-    OatFile::OatMethod oat_method = CreateOatMethod(method_code, nullptr);
-    oat_method.LinkMethod(method);
-    method->SetEntryPointFromInterpreter(artInterpreterToCompiledCodeBridge);
+    class_linker_->SetEntryPointsToCompiledCode(method, method_code, is_portable);
   } else {
     // No code? You must mean to go into the interpreter.
     // Or the generic JNI...
-    if (!method->IsNative()) {
-      const void* method_code = kUsePortableCompiler ? GetPortableToInterpreterBridge()
-          : GetQuickToInterpreterBridge();
-      OatFile::OatMethod oat_method = CreateOatMethod(method_code, nullptr);
-      oat_method.LinkMethod(method);
-      method->SetEntryPointFromInterpreter(interpreter::artInterpreterToInterpreterBridge);
-    } else {
-      const void* method_code = reinterpret_cast<void*>(art_quick_generic_jni_trampoline);
-
-      OatFile::OatMethod oat_method = CreateOatMethod(method_code, nullptr);
-      oat_method.LinkMethod(method);
-      method->SetEntryPointFromInterpreter(artInterpreterToCompiledCodeBridge);
-    }
-  }
-  // Create bridges to transition between different kinds of compiled bridge.
-  if (method->GetEntryPointFromPortableCompiledCode() == nullptr) {
-    method->SetEntryPointFromPortableCompiledCode(GetPortableToQuickBridge());
-  } else {
-    CHECK(method->GetEntryPointFromQuickCompiledCode() == nullptr);
-    method->SetEntryPointFromQuickCompiledCode(GetQuickToPortableBridge());
-    method->SetIsPortableCompiled();
+    class_linker_->SetEntryPointsToInterpreter(method);
   }
 }
 
@@ -282,19 +136,9 @@
   {
     ScopedObjectAccess soa(Thread::Current());
 
-    InstructionSet instruction_set = kRuntimeISA;
-
+    const InstructionSet instruction_set = kRuntimeISA;
     // Take the default set of instruction features from the build.
-    InstructionSetFeatures instruction_set_features =
-        ParseFeatureList(Runtime::GetDefaultInstructionSetFeatures());
-
-#if defined(__arm__)
-    InstructionSetFeatures runtime_features = GuessInstructionFeatures();
-
-    // for ARM, do a runtime check to make sure that the features we are passed from
-    // the build match the features we actually determine at runtime.
-    ASSERT_LE(instruction_set_features, runtime_features);
-#endif
+    instruction_set_features_.reset(InstructionSetFeatures::FromCppDefines());
 
     runtime_->SetInstructionSet(instruction_set);
     for (int i = 0; i < Runtime::kLastCalleeSaveType; i++) {
@@ -313,7 +157,7 @@
                                               verification_results_.get(),
                                               method_inliner_map_.get(),
                                               compiler_kind, instruction_set,
-                                              instruction_set_features,
+                                              instruction_set_features_.get(),
                                               true, new std::set<std::string>,
                                               2, true, true, timer_.get()));
   }
diff --git a/compiler/common_compiler_test.h b/compiler/common_compiler_test.h
index df06b71..20b750c 100644
--- a/compiler/common_compiler_test.h
+++ b/compiler/common_compiler_test.h
@@ -81,6 +81,8 @@
   std::unique_ptr<CompilerCallbacks> callbacks_;
   std::unique_ptr<CompilerDriver> compiler_driver_;
   std::unique_ptr<CumulativeLogger> timer_;
+  std::unique_ptr<const InstructionSetFeatures> instruction_set_features_;
+
 
  private:
   std::unique_ptr<MemMap> image_reservation_;
diff --git a/compiler/dex/compiler_ir.h b/compiler/dex/compiler_ir.h
index 37e3a7a..34585c1 100644
--- a/compiler/dex/compiler_ir.h
+++ b/compiler/dex/compiler_ir.h
@@ -68,7 +68,7 @@
   InstructionSet instruction_set;
   bool target64;
 
-  InstructionSetFeatures GetInstructionSetFeatures() {
+  const InstructionSetFeatures* GetInstructionSetFeatures() {
     return compiler_driver->GetInstructionSetFeatures();
   }
 
diff --git a/compiler/dex/quick/arm/utility_arm.cc b/compiler/dex/quick/arm/utility_arm.cc
index e833c9a..09acf4c 100644
--- a/compiler/dex/quick/arm/utility_arm.cc
+++ b/compiler/dex/quick/arm/utility_arm.cc
@@ -969,9 +969,9 @@
     size = k32;
   }
   LIR* load;
-  if (UNLIKELY(is_volatile == kVolatile &&
-               (size == k64 || size == kDouble) &&
-               !cu_->compiler_driver->GetInstructionSetFeatures().HasLpae())) {
+  if (is_volatile == kVolatile && (size == k64 || size == kDouble) &&
+      !cu_->compiler_driver->GetInstructionSetFeatures()->
+          AsArmInstructionSetFeatures()->HasLpae()) {
     // Only 64-bit load needs special handling.
     // If the cpu supports LPAE, aligned LDRD is atomic - fall through to LoadBaseDisp().
     DCHECK(!r_dest.IsFloat());  // See RegClassForFieldLoadSave().
@@ -1093,9 +1093,9 @@
   }
 
   LIR* store;
-  if (UNLIKELY(is_volatile == kVolatile &&
-               (size == k64 || size == kDouble) &&
-               !cu_->compiler_driver->GetInstructionSetFeatures().HasLpae())) {
+  if (is_volatile == kVolatile && (size == k64 || size == kDouble) &&
+      !cu_->compiler_driver->GetInstructionSetFeatures()->
+          AsArmInstructionSetFeatures()->HasLpae()) {
     // Only 64-bit store needs special handling.
     // If the cpu supports LPAE, aligned STRD is atomic - fall through to StoreBaseDisp().
     // Use STREXD for the atomic store. (Expect displacement > 0, don't optimize for == 0.)
diff --git a/compiler/dex/quick/gen_common.cc b/compiler/dex/quick/gen_common.cc
index 12ca065..a33d15f 100644
--- a/compiler/dex/quick/gen_common.cc
+++ b/compiler/dex/quick/gen_common.cc
@@ -1606,7 +1606,8 @@
       rl_result = GenDivRem(rl_dest, rl_src1.reg, rl_src2.reg, op == kOpDiv);
       done = true;
     } else if (cu_->instruction_set == kThumb2) {
-      if (cu_->GetInstructionSetFeatures().HasDivideInstruction()) {
+      if (cu_->GetInstructionSetFeatures()->AsArmInstructionSetFeatures()->
+              HasDivideInstruction()) {
         // Use ARM SDIV instruction for division.  For remainder we also need to
         // calculate using a MUL and subtract.
         rl_src1 = LoadValue(rl_src1, kCoreReg);
@@ -1875,7 +1876,8 @@
         rl_result = GenDivRemLit(rl_dest, rl_src, lit, is_div);
         done = true;
       } else if (cu_->instruction_set == kThumb2) {
-        if (cu_->GetInstructionSetFeatures().HasDivideInstruction()) {
+        if (cu_->GetInstructionSetFeatures()->AsArmInstructionSetFeatures()->
+                HasDivideInstruction()) {
           // Use ARM SDIV instruction for division.  For remainder we also need to
           // calculate using a MUL and subtract.
           rl_src = LoadValue(rl_src, kCoreReg);
diff --git a/compiler/driver/compiler_driver.cc b/compiler/driver/compiler_driver.cc
index fb648fc..aac33d2 100644
--- a/compiler/driver/compiler_driver.cc
+++ b/compiler/driver/compiler_driver.cc
@@ -329,7 +329,7 @@
                                DexFileToMethodInlinerMap* method_inliner_map,
                                Compiler::Kind compiler_kind,
                                InstructionSet instruction_set,
-                               InstructionSetFeatures instruction_set_features,
+                               const InstructionSetFeatures* instruction_set_features,
                                bool image, std::set<std::string>* image_classes, size_t thread_count,
                                bool dump_stats, bool dump_passes, CumulativeLogger* timer,
                                std::string profile_file)
diff --git a/compiler/driver/compiler_driver.h b/compiler/driver/compiler_driver.h
index c445683..0425d27 100644
--- a/compiler/driver/compiler_driver.h
+++ b/compiler/driver/compiler_driver.h
@@ -88,7 +88,7 @@
                           DexFileToMethodInlinerMap* method_inliner_map,
                           Compiler::Kind compiler_kind,
                           InstructionSet instruction_set,
-                          InstructionSetFeatures instruction_set_features,
+                          const InstructionSetFeatures* instruction_set_features,
                           bool image, std::set<std::string>* image_classes,
                           size_t thread_count, bool dump_stats, bool dump_passes,
                           CumulativeLogger* timer, std::string profile_file = "");
@@ -115,7 +115,7 @@
     return instruction_set_;
   }
 
-  InstructionSetFeatures GetInstructionSetFeatures() const {
+  const InstructionSetFeatures* GetInstructionSetFeatures() const {
     return instruction_set_features_;
   }
 
@@ -475,7 +475,7 @@
   std::unique_ptr<Compiler> compiler_;
 
   const InstructionSet instruction_set_;
-  const InstructionSetFeatures instruction_set_features_;
+  const InstructionSetFeatures* const instruction_set_features_;
 
   // All class references that require
   mutable ReaderWriterMutex freezing_constructor_lock_ DEFAULT_MUTEX_ACQUIRED_AFTER;
diff --git a/compiler/jni/jni_compiler_test.cc b/compiler/jni/jni_compiler_test.cc
index fd7d350..0fea2a7 100644
--- a/compiler/jni/jni_compiler_test.cc
+++ b/compiler/jni/jni_compiler_test.cc
@@ -73,10 +73,10 @@
     }
     ASSERT_TRUE(method != nullptr) << method_name << " " << method_sig;
     if (check_generic_jni_) {
-      method->SetEntryPointFromQuickCompiledCode(class_linker_->GetQuickGenericJniTrampoline());
+      method->SetEntryPointFromQuickCompiledCode(class_linker_->GetRuntimeQuickGenericJniStub());
     } else {
-      if (method->GetEntryPointFromQuickCompiledCode() == nullptr ||
-          method->GetEntryPointFromQuickCompiledCode() == class_linker_->GetQuickGenericJniTrampoline()) {
+      const void* code = method->GetEntryPointFromQuickCompiledCode();
+      if (code == nullptr || class_linker_->IsQuickGenericJniStub(code)) {
         CompileMethod(method);
         ASSERT_TRUE(method->GetEntryPointFromQuickCompiledCode() != nullptr)
             << method_name << " " << method_sig;
diff --git a/compiler/oat_test.cc b/compiler/oat_test.cc
index 3fcc369..a9d30b6 100644
--- a/compiler/oat_test.cc
+++ b/compiler/oat_test.cc
@@ -95,7 +95,10 @@
       : Compiler::kQuick;
   InstructionSet insn_set = kIsTargetBuild ? kThumb2 : kX86;
 
-  InstructionSetFeatures insn_features;
+  std::string error_msg;
+  std::unique_ptr<const InstructionSetFeatures> insn_features(
+      InstructionSetFeatures::FromFeatureString(insn_set, "default", &error_msg));
+  ASSERT_TRUE(insn_features.get() != nullptr) << error_msg;
   compiler_options_.reset(new CompilerOptions);
   verification_results_.reset(new VerificationResults(compiler_options_.get()));
   method_inliner_map_.reset(new DexFileToMethodInlinerMap);
@@ -106,7 +109,7 @@
                                             verification_results_.get(),
                                             method_inliner_map_.get(),
                                             compiler_kind, insn_set,
-                                            insn_features, false, NULL, 2, true, true,
+                                            insn_features.get(), false, NULL, 2, true, true,
                                             timer_.get()));
   jobject class_loader = NULL;
   if (kCompile) {
@@ -135,7 +138,6 @@
   if (kCompile) {  // OatWriter strips the code, regenerate to compare
     compiler_driver_->CompileAll(class_loader, class_linker->GetBootClassPath(), &timings);
   }
-  std::string error_msg;
   std::unique_ptr<OatFile> oat_file(OatFile::Open(tmp.GetFilename(), tmp.GetFilename(), NULL, false,
                                             &error_msg));
   ASSERT_TRUE(oat_file.get() != nullptr) << error_msg;
@@ -193,13 +195,16 @@
 }
 
 TEST_F(OatTest, OatHeaderIsValid) {
-    InstructionSet instruction_set = kX86;
-    InstructionSetFeatures instruction_set_features;
+    InstructionSet insn_set = kX86;
+    std::string error_msg;
+    std::unique_ptr<const InstructionSetFeatures> insn_features(
+        InstructionSetFeatures::FromFeatureString(insn_set, "default", &error_msg));
+    ASSERT_TRUE(insn_features.get() != nullptr) << error_msg;
     std::vector<const DexFile*> dex_files;
     uint32_t image_file_location_oat_checksum = 0;
     uint32_t image_file_location_oat_begin = 0;
-    std::unique_ptr<OatHeader> oat_header(OatHeader::Create(instruction_set,
-                                                            instruction_set_features,
+    std::unique_ptr<OatHeader> oat_header(OatHeader::Create(insn_set,
+                                                            insn_features.get(),
                                                             &dex_files,
                                                             image_file_location_oat_checksum,
                                                             image_file_location_oat_begin,
diff --git a/compiler/optimizing/gvn.h b/compiler/optimizing/gvn.h
index 41b3ceb..a98d714 100644
--- a/compiler/optimizing/gvn.h
+++ b/compiler/optimizing/gvn.h
@@ -17,7 +17,6 @@
 #ifndef ART_COMPILER_OPTIMIZING_GVN_H_
 #define ART_COMPILER_OPTIMIZING_GVN_H_
 
-#include <gtest/gtest.h>
 #include "nodes.h"
 
 namespace art {
@@ -221,7 +220,7 @@
   // Mark visisted blocks. Only used for debugging.
   GrowableArray<bool> visited_;
 
-  FRIEND_TEST(GVNTest, LoopSideEffects);
+  ART_FRIEND_TEST(GVNTest, LoopSideEffects);
   DISALLOW_COPY_AND_ASSIGN(GlobalValueNumberer);
 };
 
diff --git a/compiler/optimizing/register_allocator.h b/compiler/optimizing/register_allocator.h
index d4c233a..0c3a9b3 100644
--- a/compiler/optimizing/register_allocator.h
+++ b/compiler/optimizing/register_allocator.h
@@ -21,8 +21,6 @@
 #include "primitive.h"
 #include "utils/growable_array.h"
 
-#include "gtest/gtest.h"
-
 namespace art {
 
 class CodeGenerator;
@@ -189,7 +187,7 @@
   // The maximum live registers at safepoints.
   size_t maximum_number_of_live_registers_;
 
-  FRIEND_TEST(RegisterAllocatorTest, FreeUntil);
+  ART_FRIEND_TEST(RegisterAllocatorTest, FreeUntil);
 
   DISALLOW_COPY_AND_ASSIGN(RegisterAllocator);
 };
diff --git a/compiler/utils/arena_allocator.cc b/compiler/utils/arena_allocator.cc
index 516ac2b..0c93f0a 100644
--- a/compiler/utils/arena_allocator.cc
+++ b/compiler/utils/arena_allocator.cc
@@ -15,6 +15,7 @@
  */
 
 #include <algorithm>
+#include <iomanip>
 #include <numeric>
 
 #include "arena_allocator.h"
diff --git a/dex2oat/dex2oat.cc b/dex2oat/dex2oat.cc
index 2a7d998..664e0b1 100644
--- a/dex2oat/dex2oat.cc
+++ b/dex2oat/dex2oat.cc
@@ -251,13 +251,23 @@
                      const CompilerOptions& compiler_options,
                      Compiler::Kind compiler_kind,
                      InstructionSet instruction_set,
-                     InstructionSetFeatures instruction_set_features,
+                     const InstructionSetFeatures* instruction_set_features,
                      VerificationResults* verification_results,
                      DexFileToMethodInlinerMap* method_inliner_map,
                      size_t thread_count)
       SHARED_TRYLOCK_FUNCTION(true, Locks::mutator_lock_) {
     CHECK(verification_results != nullptr);
     CHECK(method_inliner_map != nullptr);
+    if (instruction_set == kRuntimeISA) {
+      std::unique_ptr<const InstructionSetFeatures> runtime_features(
+          InstructionSetFeatures::FromCppDefines());
+      if (!instruction_set_features->Equals(runtime_features.get())) {
+        LOG(WARNING) << "Mismatch between dex2oat instruction set features ("
+            << *instruction_set_features << ") and those of dex2oat executable ("
+            << *runtime_features <<") for the command line:\n"
+            << CommandLine();
+      }
+    }
     std::unique_ptr<Dex2Oat> dex2oat(new Dex2Oat(&compiler_options,
                                                  compiler_kind,
                                                  instruction_set,
@@ -482,7 +492,7 @@
   explicit Dex2Oat(const CompilerOptions* compiler_options,
                    Compiler::Kind compiler_kind,
                    InstructionSet instruction_set,
-                   InstructionSetFeatures instruction_set_features,
+                   const InstructionSetFeatures* instruction_set_features,
                    VerificationResults* verification_results,
                    DexFileToMethodInlinerMap* method_inliner_map,
                    size_t thread_count)
@@ -527,7 +537,7 @@
   static void OpenClassPathFiles(const std::string& class_path,
                                  std::vector<const DexFile*>& dex_files) {
     std::vector<std::string> parsed;
-    Split(class_path, ':', parsed);
+    Split(class_path, ':', &parsed);
     // Take Locks::mutator_lock_ so that lock ordering on the ClassLinker::dex_lock_ is maintained.
     ScopedObjectAccess soa(Thread::Current());
     for (size_t i = 0; i < parsed.size(); ++i) {
@@ -556,7 +566,7 @@
   const Compiler::Kind compiler_kind_;
 
   const InstructionSet instruction_set_;
-  const InstructionSetFeatures instruction_set_features_;
+  const InstructionSetFeatures* const instruction_set_features_;
 
   VerificationResults* const verification_results_;
   DexFileToMethodInlinerMap* const method_inliner_map_;
@@ -728,38 +738,6 @@
 const unsigned int WatchDog::kWatchDogWarningSeconds;
 const unsigned int WatchDog::kWatchDogTimeoutSeconds;
 
-// Given a set of instruction features from the build, parse it.  The
-// input 'str' is a comma separated list of feature names.  Parse it and
-// return the InstructionSetFeatures object.
-static InstructionSetFeatures ParseFeatureList(std::string str) {
-  InstructionSetFeatures result;
-  typedef std::vector<std::string> FeatureList;
-  FeatureList features;
-  Split(str, ',', features);
-  for (FeatureList::iterator i = features.begin(); i != features.end(); i++) {
-    std::string feature = Trim(*i);
-    if (feature == "default") {
-      // Nothing to do.
-    } else if (feature == "div") {
-      // Supports divide instruction.
-       result.SetHasDivideInstruction(true);
-    } else if (feature == "nodiv") {
-      // Turn off support for divide instruction.
-      result.SetHasDivideInstruction(false);
-    } else if (feature == "lpae") {
-      // Supports Large Physical Address Extension.
-      result.SetHasLpae(true);
-    } else if (feature == "nolpae") {
-      // Turn off support for Large Physical Address Extension.
-      result.SetHasLpae(false);
-    } else {
-      Usage("Unknown instruction set feature: '%s'", feature.c_str());
-    }
-  }
-  // others...
-  return result;
-}
-
 void ParseStringAfterChar(const std::string& s, char c, std::string* parsed_value) {
   std::string::size_type colon = s.find(c);
   if (colon == std::string::npos) {
@@ -854,11 +832,12 @@
   int tiny_method_threshold = CompilerOptions::kDefaultTinyMethodThreshold;
   int num_dex_methods_threshold = CompilerOptions::kDefaultNumDexMethodsThreshold;
 
-  // Take the default set of instruction features from the build.
-  InstructionSetFeatures instruction_set_features =
-      ParseFeatureList(Runtime::GetDefaultInstructionSetFeatures());
-
+  // Initialize ISA and ISA features to default values.
   InstructionSet instruction_set = kRuntimeISA;
+  std::string error_msg;
+  std::unique_ptr<const InstructionSetFeatures> instruction_set_features(
+      InstructionSetFeatures::FromFeatureString(instruction_set, "default", &error_msg));
+  CHECK(instruction_set_features.get() != nullptr) << error_msg;
 
   // Profile file to use
   std::string profile_file;
@@ -961,9 +940,20 @@
       } else if (instruction_set_str == "x86_64") {
         instruction_set = kX86_64;
       }
+    } else if (option.starts_with("--instruction-set-variant=")) {
+      StringPiece str = option.substr(strlen("--instruction-set-variant=")).data();
+      instruction_set_features.reset(
+          InstructionSetFeatures::FromVariant(instruction_set, str.as_string(), &error_msg));
+      if (instruction_set_features.get() == nullptr) {
+        Usage("%s", error_msg.c_str());
+      }
     } else if (option.starts_with("--instruction-set-features=")) {
       StringPiece str = option.substr(strlen("--instruction-set-features=")).data();
-      instruction_set_features = ParseFeatureList(str.as_string());
+      instruction_set_features.reset(
+          InstructionSetFeatures::FromFeatureString(instruction_set, str.as_string(), &error_msg));
+      if (instruction_set_features.get() == nullptr) {
+        Usage("%s", error_msg.c_str());
+      }
     } else if (option.starts_with("--compiler-backend=")) {
       StringPiece backend_str = option.substr(strlen("--compiler-backend=")).data();
       if (backend_str == "Quick") {
@@ -1283,8 +1273,7 @@
   QuickCompilerCallbacks callbacks(verification_results.get(), &method_inliner_map);
   runtime_options.push_back(std::make_pair("compilercallbacks", &callbacks));
   runtime_options.push_back(
-      std::make_pair("imageinstructionset",
-                     reinterpret_cast<const void*>(GetInstructionSetString(instruction_set))));
+      std::make_pair("imageinstructionset", GetInstructionSetString(instruction_set)));
 
   Dex2Oat* p_dex2oat;
   if (!Dex2Oat::Create(&p_dex2oat,
@@ -1292,7 +1281,7 @@
                        *compiler_options,
                        compiler_kind,
                        instruction_set,
-                       instruction_set_features,
+                       instruction_set_features.get(),
                        verification_results.get(),
                        &method_inliner_map,
                        thread_count)) {
diff --git a/disassembler/Android.mk b/disassembler/Android.mk
index d67c169..eb3b024 100644
--- a/disassembler/Android.mk
+++ b/disassembler/Android.mk
@@ -63,6 +63,7 @@
   	$(call set-target-local-cflags-vars,$(2))
   else # host
     LOCAL_CLANG := $(ART_HOST_CLANG)
+    LOCAL_LDLIBS := $(ART_HOST_LDLIBS)
     LOCAL_CFLAGS += $(ART_HOST_CFLAGS)
     ifeq ($$(art_ndebug_or_debug),debug)
       LOCAL_CFLAGS += $(ART_HOST_DEBUG_CFLAGS)
diff --git a/oatdump/oatdump.cc b/oatdump/oatdump.cc
index 1f2c0aa..2e64198 100644
--- a/oatdump/oatdump.cc
+++ b/oatdump/oatdump.cc
@@ -396,8 +396,13 @@
     os << "INSTRUCTION SET:\n";
     os << oat_header.GetInstructionSet() << "\n\n";
 
-    os << "INSTRUCTION SET FEATURES:\n";
-    os << oat_header.GetInstructionSetFeatures().GetFeatureString() << "\n\n";
+    {
+      std::unique_ptr<const InstructionSetFeatures> features(
+          InstructionSetFeatures::FromBitmap(oat_header.GetInstructionSet(),
+                                             oat_header.GetInstructionSetFeaturesBitmap()));
+      os << "INSTRUCTION SET FEATURES:\n";
+      os << features->GetFeatureString() << "\n\n";
+    }
 
     os << "DEX FILE COUNT:\n";
     os << oat_header.GetDexFileCount() << "\n\n";
@@ -1493,7 +1498,7 @@
   const void* GetQuickOatCodeBegin(mirror::ArtMethod* m)
       SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
     const void* quick_code = m->GetEntryPointFromQuickCompiledCode();
-    if (quick_code == Runtime::Current()->GetClassLinker()->GetQuickResolutionTrampoline()) {
+    if (Runtime::Current()->GetClassLinker()->IsQuickResolutionStub(quick_code)) {
       quick_code = oat_dumper_->GetQuickOatCode(m);
     }
     if (oat_dumper_->GetInstructionSet() == kThumb2) {
diff --git a/runtime/Android.mk b/runtime/Android.mk
index e954476..dbafb83 100644
--- a/runtime/Android.mk
+++ b/runtime/Android.mk
@@ -218,11 +218,11 @@
 LIBART_TARGET_SRC_FILES_arm := \
   arch/arm/context_arm.cc.arm \
   arch/arm/entrypoints_init_arm.cc \
+  arch/arm/instruction_set_features_arm.S \
   arch/arm/jni_entrypoints_arm.S \
   arch/arm/memcmp16_arm.S \
   arch/arm/portable_entrypoints_arm.S \
   arch/arm/quick_entrypoints_arm.S \
-  arch/arm/arm_sdiv.S \
   arch/arm/thread_arm.cc \
   arch/arm/fault_handler_arm.cc
 
@@ -317,7 +317,7 @@
   thread_state.h \
   verifier/method_verifier.h
 
-LIBART_CFLAGS :=
+LIBART_CFLAGS := -DBUILDING_LIBART=1
 ifeq ($(ART_USE_PORTABLE_COMPILER),true)
   LIBART_CFLAGS += -DART_USE_PORTABLE_COMPILER=1
 endif
@@ -328,6 +328,29 @@
   LIBART_CFLAGS += -DUSE_JEMALLOC
 endif
 
+# Default dex2oat instruction set features.
+LIBART_HOST_DEFAULT_INSTRUCTION_SET_FEATURES := default
+LIBART_TARGET_DEFAULT_INSTRUCTION_SET_FEATURES := default
+2ND_LIBART_TARGET_DEFAULT_INSTRUCTION_SET_FEATURES := default
+ifeq ($(DEX2OAT_TARGET_ARCH),arm)
+  ifneq (,$(filter $(DEX2OAT_TARGET_CPU_VARIANT),cortex-a15 krait denver))
+    LIBART_TARGET_DEFAULT_INSTRUCTION_SET_FEATURES := lpae,div
+  else
+    ifneq (,$(filter $(DEX2OAT_TARGET_CPU_VARIANT),cortex-a7))
+      LIBART_TARGET_DEFAULT_INSTRUCTION_SET_FEATURES := div
+    endif
+  endif
+endif
+ifeq ($(2ND_DEX2OAT_TARGET_ARCH),arm)
+  ifneq (,$(filter $(DEX2OAT_TARGET_CPU_VARIANT),cortex-a15 krait denver))
+    2ND_LIBART_TARGET_DEFAULT_INSTRUCTION_SET_FEATURES := lpae,div
+  else
+    ifneq (,$(filter $(DEX2OAT_TARGET_CPU_VARIANT),cortex-a7))
+      2ND_LIBART_TARGET_DEFAULT_INSTRUCTION_SET_FEATURES := div
+    endif
+  endif
+endif
+
 # $(1): target or host
 # $(2): ndebug or debug
 define build-libart
@@ -393,6 +416,9 @@
   ifeq ($$(art_target_or_host),target)
     $$(eval $$(call set-target-local-clang-vars))
     $$(eval $$(call set-target-local-cflags-vars,$(2)))
+    LOCAL_CFLAGS_$(DEX2OAT_TARGET_ARCH) += -DART_DEFAULT_INSTRUCTION_SET_FEATURES="$(LIBART_TARGET_DEFAULT_INSTRUCTION_SET_FEATURES)"
+    LOCAL_CFLAGS_$(2ND_DEX2OAT_TARGET_ARCH) += -DART_DEFAULT_INSTRUCTION_SET_FEATURES="$(2ND_LIBART_TARGET_DEFAULT_INSTRUCTION_SET_FEATURES)"
+
     # TODO: Loop with ifeq, ART_TARGET_CLANG
     ifneq ($$(ART_TARGET_CLANG_$$(TARGET_ARCH)),true)
       LOCAL_SRC_FILES_$$(TARGET_ARCH) += $$(LIBART_GCC_ONLY_SRC_FILES)
@@ -401,18 +427,25 @@
       LOCAL_SRC_FILES_$$(TARGET_2ND_ARCH) += $$(LIBART_GCC_ONLY_SRC_FILES)
     endif
   else # host
-    LOCAL_CLANG := $$(ART_HOST_CLANG)
-    ifeq ($$(ART_HOST_CLANG),false)
+    ifneq ($$(ART_HOST_CLANG),true)
+      # Add files only built with GCC on the host.
       LOCAL_SRC_FILES += $$(LIBART_GCC_ONLY_SRC_FILES)
     endif
+    LOCAL_CLANG := $$(ART_HOST_CLANG)
+    LOCAL_LDLIBS := $$(ART_HOST_LDLIBS)
+    LOCAL_LDLIBS += -ldl -lpthread
+    ifeq ($$(HOST_OS),linux)
+      LOCAL_LDLIBS += -lrt
+    endif
     LOCAL_CFLAGS += $$(ART_HOST_CFLAGS)
+    LOCAL_CFLAGS += -DART_DEFAULT_INSTRUCTION_SET_FEATURES="$(LIBART_HOST_DEFAULT_INSTRUCTION_SET_FEATURES)"
+
     ifeq ($$(art_ndebug_or_debug),debug)
       LOCAL_CFLAGS += $$(ART_HOST_DEBUG_CFLAGS)
-      LOCAL_LDLIBS += $$(ART_HOST_DEBUG_LDLIBS)
-      LOCAL_STATIC_LIBRARIES := libgtest_host
     else
       LOCAL_CFLAGS += $$(ART_HOST_NON_DEBUG_CFLAGS)
     endif
+    LOCAL_MULTILIB := both
   endif
 
   LOCAL_C_INCLUDES += $$(ART_C_INCLUDES)
@@ -427,11 +460,6 @@
   else # host
     LOCAL_STATIC_LIBRARIES += libcutils libziparchive-host libz libutils
     LOCAL_SHARED_LIBRARIES += libsigchain
-    LOCAL_LDLIBS += -ldl -lpthread
-    ifeq ($$(HOST_OS),linux)
-      LOCAL_LDLIBS += -lrt
-    endif
-    LOCAL_MULTILIB := both
   endif
   ifeq ($$(ART_USE_PORTABLE_COMPILER),true)
     include $$(LLVM_GEN_INTRINSICS_MK)
@@ -488,6 +516,9 @@
 LOCAL_PATH :=
 LIBART_COMMON_SRC_FILES :=
 LIBART_GCC_ONLY_SRC_FILES :=
+LIBART_HOST_DEFAULT_INSTRUCTION_SET_FEATURES :=
+LIBART_TARGET_DEFAULT_INSTRUCTION_SET_FEATURES :=
+2ND_LIBART_TARGET_DEFAULT_INSTRUCTION_SET_FEATURES :=
 LIBART_TARGET_LDFLAGS :=
 LIBART_HOST_LDFLAGS :=
 LIBART_TARGET_SRC_FILES :=
diff --git a/runtime/arch/arm/arm_sdiv.S b/runtime/arch/arm/arm_sdiv.S
deleted file mode 100644
index babdbf5..0000000
--- a/runtime/arch/arm/arm_sdiv.S
+++ /dev/null
@@ -1,24 +0,0 @@
-// This function is used to check for the CPU's support for the sdiv
-// instruction at runtime.  It will either return the value 1 or
-// will cause an invalid instruction trap (SIGILL signal).  The
-// caller must arrange for the signal handler to set the r0
-// register to 0 and move the pc forward by 4 bytes (to skip
-// the invalid instruction).
-
-
-#include "asm_support_arm.S"
-
-.section .text
-ENTRY_NO_HIDE CheckForARMSDIVInstruction
-  mov r1,#1
-  // depending on the architecture, the assembler will not allow an
-  // sdiv instruction, so we will have to output the bytes directly.
-
-  // sdiv r0,r1,r1 is two words: 0xfb91 0xf1f0.  We need little endian.
-  .byte 0x91,0xfb,0xf1,0xf0
-
-  // if the divide worked, r0 will have the value #1 (result of sdiv).
-  // It will have 0 otherwise (set by the signal handler)
-  // the value is just returned from this function.
-  bx lr
-  END CheckForARMSDIVInstruction
diff --git a/runtime/arch/arm/asm_support_arm.S b/runtime/arch/arm/asm_support_arm.S
index a3e3b21..fb6458c 100644
--- a/runtime/arch/arm/asm_support_arm.S
+++ b/runtime/arch/arm/asm_support_arm.S
@@ -42,18 +42,6 @@
     .fnstart
 .endm
 
-.macro ENTRY_NO_HIDE name
-    .thumb_func
-    .type \name, #function
-    .global \name
-    /* Cache alignment for function entry */
-    .balign 16
-\name:
-    .cfi_startproc
-    .fnstart
-.endm
-
-
 .macro ARM_ENTRY name
     .arm
     .type \name, #function
@@ -68,19 +56,6 @@
     .fnstart
 .endm
 
-.macro ARM_ENTRY_NO_HIDE name
-    .arm
-    .type \name, #function
-    .global \name
-    /* Cache alignment for function entry */
-    .balign 16
-\name:
-    .cfi_startproc
-     /* Ensure we get a sane starting CFA. */
-    .cfi_def_cfa sp,0
-    .fnstart
-.endm
-
 .macro END name
     .fnend
     .cfi_endproc
diff --git a/runtime/arch/arm/entrypoints_init_arm.cc b/runtime/arch/arm/entrypoints_init_arm.cc
index 2780d1b..ff0eb4a 100644
--- a/runtime/arch/arm/entrypoints_init_arm.cc
+++ b/runtime/arch/arm/entrypoints_init_arm.cc
@@ -21,17 +21,11 @@
 #include "entrypoints/quick/quick_entrypoints.h"
 #include "entrypoints/entrypoint_utils.h"
 #include "entrypoints/math_entrypoints.h"
+#include "entrypoints/runtime_asm_entrypoints.h"
+#include "interpreter/interpreter.h"
 
 namespace art {
 
-// Interpreter entrypoints.
-extern "C" void artInterpreterToInterpreterBridge(Thread* self, MethodHelper& mh,
-                                                  const DexFile::CodeItem* code_item,
-                                                  ShadowFrame* shadow_frame, JValue* result);
-extern "C" void artInterpreterToCompiledCodeBridge(Thread* self, MethodHelper& mh,
-                                                   const DexFile::CodeItem* code_item,
-                                                   ShadowFrame* shadow_frame, JValue* result);
-
 // Portable entrypoints.
 extern "C" void art_portable_resolution_trampoline(mirror::ArtMethod*);
 extern "C" void art_portable_to_interpreter_bridge(mirror::ArtMethod*);
diff --git a/runtime/arch/arm/instruction_set_features_arm.S b/runtime/arch/arm/instruction_set_features_arm.S
new file mode 100644
index 0000000..c26f2cd
--- /dev/null
+++ b/runtime/arch/arm/instruction_set_features_arm.S
@@ -0,0 +1,38 @@
+/*
+ * Copyright (C) 2014 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "asm_support_arm.S"
+
+.section .text
+// This function is used to check for the CPU's support for the sdiv
+// instruction at runtime.  It will either return the value 1 or
+// will cause an invalid instruction trap (SIGILL signal).  The
+// caller must arrange for the signal handler to set the r0
+// register to 0 and move the pc forward by 4 bytes (to skip
+// the invalid instruction).
+ENTRY artCheckForARMSDIVInstruction
+  mov r1,#1
+  // depending on the architecture, the assembler will not allow an
+  // sdiv instruction, so we will have to output the bytes directly.
+
+  // sdiv r0,r1,r1 is two words: 0xfb91 0xf1f0.  We need little endian.
+  .byte 0x91,0xfb,0xf1,0xf0
+
+  // if the divide worked, r0 will have the value #1 (result of sdiv).
+  // It will have 0 otherwise (set by the signal handler)
+  // the value is just returned from this function.
+  bx lr
+END artCheckForARMSDIVInstruction
diff --git a/runtime/arch/arm/portable_entrypoints_arm.S b/runtime/arch/arm/portable_entrypoints_arm.S
index 3491c18..a34db6c 100644
--- a/runtime/arch/arm/portable_entrypoints_arm.S
+++ b/runtime/arch/arm/portable_entrypoints_arm.S
@@ -138,7 +138,7 @@
 END art_portable_resolution_trampoline
 
     .extern artPortableToInterpreterBridge
-ENTRY_NO_HIDE art_portable_to_interpreter_bridge
+ENTRY art_portable_to_interpreter_bridge
     @ Fake callee save ref and args frame set up, note portable doesn't use callee save frames.
     @ TODO: just save the registers that are needed in artPortableToInterpreterBridge.
     push {r1-r3, r5-r8, r10-r11, lr}  @ 10 words of callee saves
@@ -165,3 +165,5 @@
     .cfi_adjust_cfa_offset -48
     bx      lr                     @ return
 END art_portable_to_interpreter_bridge
+
+UNIMPLEMENTED art_portable_imt_conflict_trampoline
diff --git a/runtime/arch/arm/quick_entrypoints_arm.S b/runtime/arch/arm/quick_entrypoints_arm.S
index 466e9eb..3d619be 100644
--- a/runtime/arch/arm/quick_entrypoints_arm.S
+++ b/runtime/arch/arm/quick_entrypoints_arm.S
@@ -919,7 +919,7 @@
     /*
      * Called to do a generic JNI down-call
      */
-ENTRY_NO_HIDE art_quick_generic_jni_trampoline
+ENTRY art_quick_generic_jni_trampoline
     SETUP_REF_AND_ARGS_CALLEE_SAVE_FRAME
     str r0, [sp, #0]  // Store native ArtMethod* to bottom of stack.
 
@@ -1014,7 +1014,7 @@
 END art_quick_generic_jni_trampoline
 
     .extern artQuickToInterpreterBridge
-ENTRY_NO_HIDE art_quick_to_interpreter_bridge
+ENTRY art_quick_to_interpreter_bridge
     SETUP_REF_AND_ARGS_CALLEE_SAVE_FRAME
     mov     r1, r9                 @ pass Thread::Current
     mov     r2, sp                 @ pass SP
diff --git a/runtime/arch/arm64/asm_support_arm64.S b/runtime/arch/arm64/asm_support_arm64.S
index fb49460..b3e9242 100644
--- a/runtime/arch/arm64/asm_support_arm64.S
+++ b/runtime/arch/arm64/asm_support_arm64.S
@@ -52,15 +52,6 @@
     .cfi_startproc
 .endm
 
-.macro ENTRY_NO_HIDE name
-    .type \name, #function
-    .global \name
-    /* Cache alignment for function entry */
-    .balign 16
-\name:
-    .cfi_startproc
-.endm
-
 .macro END name
     .cfi_endproc
     .size \name, .-\name
@@ -72,10 +63,4 @@
     END \name
 .endm
 
-.macro UNIMPLEMENTED_NO_HIDE name
-    ENTRY_NO_HIDE \name
-    brk 0
-    END \name
-.endm
-
 #endif  // ART_RUNTIME_ARCH_ARM64_ASM_SUPPORT_ARM64_S_
diff --git a/runtime/arch/arm64/entrypoints_init_arm64.cc b/runtime/arch/arm64/entrypoints_init_arm64.cc
index 70e93b3..871e1d1 100644
--- a/runtime/arch/arm64/entrypoints_init_arm64.cc
+++ b/runtime/arch/arm64/entrypoints_init_arm64.cc
@@ -20,17 +20,11 @@
 #include "entrypoints/quick/quick_entrypoints.h"
 #include "entrypoints/entrypoint_utils.h"
 #include "entrypoints/math_entrypoints.h"
+#include "entrypoints/runtime_asm_entrypoints.h"
+#include "interpreter/interpreter.h"
 
 namespace art {
 
-// Interpreter entrypoints.
-extern "C" void artInterpreterToInterpreterBridge(Thread* self, MethodHelper& mh,
-                                                 const DexFile::CodeItem* code_item,
-                                                 ShadowFrame* shadow_frame, JValue* result);
-extern "C" void artInterpreterToCompiledCodeBridge(Thread* self, MethodHelper& mh,
-                                           const DexFile::CodeItem* code_item,
-                                           ShadowFrame* shadow_frame, JValue* result);
-
 // Portable entrypoints.
 extern "C" void art_portable_resolution_trampoline(mirror::ArtMethod*);
 extern "C" void art_portable_to_interpreter_bridge(mirror::ArtMethod*);
diff --git a/runtime/arch/arm64/portable_entrypoints_arm64.S b/runtime/arch/arm64/portable_entrypoints_arm64.S
index 41711b5..9e2c030 100644
--- a/runtime/arch/arm64/portable_entrypoints_arm64.S
+++ b/runtime/arch/arm64/portable_entrypoints_arm64.S
@@ -25,4 +25,6 @@
 
 UNIMPLEMENTED art_portable_resolution_trampoline
 
-UNIMPLEMENTED_NO_HIDE art_portable_to_interpreter_bridge
+UNIMPLEMENTED art_portable_to_interpreter_bridge
+
+UNIMPLEMENTED art_portable_imt_conflict_trampoline
diff --git a/runtime/arch/arm64/quick_entrypoints_arm64.S b/runtime/arch/arm64/quick_entrypoints_arm64.S
index 52a2a88..ab9bf2d 100644
--- a/runtime/arch/arm64/quick_entrypoints_arm64.S
+++ b/runtime/arch/arm64/quick_entrypoints_arm64.S
@@ -1438,7 +1438,7 @@
     /*
      * Called to do a generic JNI down-call
      */
-ENTRY_NO_HIDE art_quick_generic_jni_trampoline
+ENTRY art_quick_generic_jni_trampoline
     SETUP_REF_AND_ARGS_CALLEE_SAVE_FRAME_INTERNAL
     str x0, [sp, #0]  // Store native ArtMethod* to bottom of stack.
 
@@ -1534,7 +1534,7 @@
  * x0 = method being called/to bridge to.
  * x1..x7, d0..d7 = arguments to that method.
  */
-ENTRY_NO_HIDE art_quick_to_interpreter_bridge
+ENTRY art_quick_to_interpreter_bridge
     SETUP_REF_AND_ARGS_CALLEE_SAVE_FRAME   // Set up frame and save arguments.
 
     //  x0 will contain mirror::ArtMethod* method.
diff --git a/runtime/arch/mips/entrypoints_init_mips.cc b/runtime/arch/mips/entrypoints_init_mips.cc
index 25e911d..db0f71f 100644
--- a/runtime/arch/mips/entrypoints_init_mips.cc
+++ b/runtime/arch/mips/entrypoints_init_mips.cc
@@ -14,6 +14,7 @@
  * limitations under the License.
  */
 
+#include "atomic.h"
 #include "entrypoints/interpreter/interpreter_entrypoints.h"
 #include "entrypoints/jni/jni_entrypoints.h"
 #include "entrypoints/portable/portable_entrypoints.h"
@@ -21,18 +22,11 @@
 #include "entrypoints/quick/quick_entrypoints.h"
 #include "entrypoints/entrypoint_utils.h"
 #include "entrypoints/math_entrypoints.h"
-#include "atomic.h"
+#include "entrypoints/runtime_asm_entrypoints.h"
+#include "interpreter/interpreter.h"
 
 namespace art {
 
-// Interpreter entrypoints.
-extern "C" void artInterpreterToInterpreterBridge(Thread* self, MethodHelper& mh,
-                                                 const DexFile::CodeItem* code_item,
-                                                 ShadowFrame* shadow_frame, JValue* result);
-extern "C" void artInterpreterToCompiledCodeBridge(Thread* self, MethodHelper& mh,
-                                           const DexFile::CodeItem* code_item,
-                                           ShadowFrame* shadow_frame, JValue* result);
-
 // Portable entrypoints.
 extern "C" void art_portable_resolution_trampoline(mirror::ArtMethod*);
 extern "C" void art_portable_to_interpreter_bridge(mirror::ArtMethod*);
diff --git a/runtime/arch/mips/portable_entrypoints_mips.S b/runtime/arch/mips/portable_entrypoints_mips.S
index 7545ce0..a171a1d 100644
--- a/runtime/arch/mips/portable_entrypoints_mips.S
+++ b/runtime/arch/mips/portable_entrypoints_mips.S
@@ -131,3 +131,4 @@
 
 UNIMPLEMENTED art_portable_resolution_trampoline
 UNIMPLEMENTED art_portable_to_interpreter_bridge
+UNIMPLEMENTED art_portable_imt_conflict_trampoline
diff --git a/runtime/arch/stub_test.cc b/runtime/arch/stub_test.cc
index 6b74a1b..c9b9f04 100644
--- a/runtime/arch/stub_test.cc
+++ b/runtime/arch/stub_test.cc
@@ -260,7 +260,7 @@
           "d16", "d17", "d18", "d19", "d20", "d21", "d22", "d23",
           "d24", "d25", "d26", "d27", "d28", "d29", "d30", "d31",
           "memory");  // clobber.
-#elif defined(__x86_64__) && !defined(__APPLE__)
+#elif defined(__x86_64__) && !defined(__APPLE__) && defined(__clang__)
     // Note: Uses the native convention
     // TODO: Set the thread?
     __asm__ __volatile__(
@@ -485,7 +485,7 @@
           "d16", "d17", "d18", "d19", "d20", "d21", "d22", "d23",
           "d24", "d25", "d26", "d27", "d28", "d29", "d30", "d31",
           "memory");  // clobber.
-#elif defined(__x86_64__) && !defined(__APPLE__)
+#elif defined(__x86_64__) && !defined(__APPLE__) && defined(__clang__)
     // Note: Uses the native convention
     // TODO: Set the thread?
     __asm__ __volatile__(
diff --git a/runtime/arch/x86/asm_support_x86.S b/runtime/arch/x86/asm_support_x86.S
index efbbfb3..78b97e5 100644
--- a/runtime/arch/x86/asm_support_x86.S
+++ b/runtime/arch/x86/asm_support_x86.S
@@ -135,16 +135,6 @@
     CFI_DEF_CFA(esp, 4)
 END_MACRO
 
-MACRO1(DEFINE_FUNCTION_NO_HIDE, c_name)
-    FUNCTION_TYPE(\c_name, 0)
-    .globl VAR(c_name, 0)
-    ALIGN_FUNCTION_ENTRY
-VAR(c_name, 0):
-    CFI_STARTPROC
-    // Ensure we get a sane starting CFA.
-    CFI_DEF_CFA(esp, 4)
-END_MACRO
-
 MACRO1(END_FUNCTION, c_name)
     CFI_ENDPROC
     SIZE(\c_name, 0)
diff --git a/runtime/arch/x86/entrypoints_init_x86.cc b/runtime/arch/x86/entrypoints_init_x86.cc
index 682c502..f2b91cd 100644
--- a/runtime/arch/x86/entrypoints_init_x86.cc
+++ b/runtime/arch/x86/entrypoints_init_x86.cc
@@ -19,18 +19,11 @@
 #include "entrypoints/portable/portable_entrypoints.h"
 #include "entrypoints/quick/quick_alloc_entrypoints.h"
 #include "entrypoints/quick/quick_entrypoints.h"
-#include "entrypoints/entrypoint_utils.h"
+#include "entrypoints/runtime_asm_entrypoints.h"
+#include "interpreter/interpreter.h"
 
 namespace art {
 
-// Interpreter entrypoints.
-extern "C" void artInterpreterToInterpreterBridge(Thread* self, MethodHelper& mh,
-                                                  const DexFile::CodeItem* code_item,
-                                                  ShadowFrame* shadow_frame, JValue* result);
-extern "C" void artInterpreterToCompiledCodeBridge(Thread* self, MethodHelper& mh,
-                                           const DexFile::CodeItem* code_item,
-                                           ShadowFrame* shadow_frame, JValue* result);
-
 // Portable entrypoints.
 extern "C" void art_portable_resolution_trampoline(mirror::ArtMethod*);
 extern "C" void art_portable_to_interpreter_bridge(mirror::ArtMethod*);
diff --git a/runtime/arch/x86/portable_entrypoints_x86.S b/runtime/arch/x86/portable_entrypoints_x86.S
index f5fe869..70c0ae2 100644
--- a/runtime/arch/x86/portable_entrypoints_x86.S
+++ b/runtime/arch/x86/portable_entrypoints_x86.S
@@ -111,7 +111,7 @@
   ret
 END_FUNCTION art_portable_resolution_trampoline
 
-DEFINE_FUNCTION_NO_HIDE art_portable_to_interpreter_bridge
+DEFINE_FUNCTION art_portable_to_interpreter_bridge
   PUSH ebp                        // Set up frame.
   movl %esp, %ebp
   CFI_DEF_CFA_REGISTER(%ebp)
@@ -127,3 +127,5 @@
   CFI_DEF_CFA(%esp, 4)
   ret
 END_FUNCTION art_portable_to_interpreter_bridge
+
+UNIMPLEMENTED art_portable_imt_conflict_trampoline
diff --git a/runtime/arch/x86/quick_entrypoints_x86.S b/runtime/arch/x86/quick_entrypoints_x86.S
index 411d273..a158e6d 100644
--- a/runtime/arch/x86/quick_entrypoints_x86.S
+++ b/runtime/arch/x86/quick_entrypoints_x86.S
@@ -1140,7 +1140,7 @@
     DELIVER_PENDING_EXCEPTION
 END_FUNCTION art_quick_resolution_trampoline
 
-DEFINE_FUNCTION_NO_HIDE art_quick_generic_jni_trampoline
+DEFINE_FUNCTION art_quick_generic_jni_trampoline
     SETUP_REF_AND_ARGS_CALLEE_SAVE_FRAME
     // This also stores the native ArtMethod reference at the bottom of the stack.
 
@@ -1220,7 +1220,7 @@
     DELIVER_PENDING_EXCEPTION
 END_FUNCTION art_quick_generic_jni_trampoline
 
-DEFINE_FUNCTION_NO_HIDE art_quick_to_interpreter_bridge
+DEFINE_FUNCTION art_quick_to_interpreter_bridge
     SETUP_REF_AND_ARGS_CALLEE_SAVE_FRAME   // save frame
     mov %esp, %edx                // remember SP
     PUSH eax                      // alignment padding
diff --git a/runtime/arch/x86_64/asm_support_x86_64.S b/runtime/arch/x86_64/asm_support_x86_64.S
index 4ae61a2..5964314 100644
--- a/runtime/arch/x86_64/asm_support_x86_64.S
+++ b/runtime/arch/x86_64/asm_support_x86_64.S
@@ -132,16 +132,6 @@
     CFI_DEF_CFA(rsp, 8)
 END_MACRO
 
-MACRO1(DEFINE_FUNCTION_NO_HIDE, c_name)
-    FUNCTION_TYPE(\c_name, 0)
-    .globl VAR(c_name, 0)
-    ALIGN_FUNCTION_ENTRY
-VAR(c_name, 0):
-    CFI_STARTPROC
-    // Ensure we get a sane starting CFA.
-    CFI_DEF_CFA(rsp, 8)
-END_MACRO
-
 MACRO1(END_FUNCTION, c_name)
     CFI_ENDPROC
     SIZE(\c_name, 0)
@@ -172,18 +162,6 @@
     SIZE(\name, 0)
 END_MACRO
 
-MACRO1(UNIMPLEMENTED_NO_HIDE,name)
-    FUNCTION_TYPE(\name, 0)
-    .globl VAR(name, 0)
-    ALIGN_FUNCTION_ENTRY
-VAR(name, 0):
-    CFI_STARTPROC
-    int3
-    int3
-    CFI_ENDPROC
-    SIZE(\name, 0)
-END_MACRO
-
 MACRO0(UNREACHABLE)
     int3
 END_MACRO
diff --git a/runtime/arch/x86_64/entrypoints_init_x86_64.cc b/runtime/arch/x86_64/entrypoints_init_x86_64.cc
index c9028e1..be73594 100644
--- a/runtime/arch/x86_64/entrypoints_init_x86_64.cc
+++ b/runtime/arch/x86_64/entrypoints_init_x86_64.cc
@@ -19,19 +19,12 @@
 #include "entrypoints/portable/portable_entrypoints.h"
 #include "entrypoints/quick/quick_alloc_entrypoints.h"
 #include "entrypoints/quick/quick_entrypoints.h"
-#include "entrypoints/entrypoint_utils.h"
 #include "entrypoints/math_entrypoints.h"
+#include "entrypoints/runtime_asm_entrypoints.h"
+#include "interpreter/interpreter.h"
 
 namespace art {
 
-// Interpreter entrypoints.
-extern "C" void artInterpreterToInterpreterBridge(Thread* self, MethodHelper& mh,
-                                                  const DexFile::CodeItem* code_item,
-                                                  ShadowFrame* shadow_frame, JValue* result);
-extern "C" void artInterpreterToCompiledCodeBridge(Thread* self, MethodHelper& mh,
-                                                   const DexFile::CodeItem* code_item,
-                                                   ShadowFrame* shadow_frame, JValue* result);
-
 // Portable entrypoints.
 extern "C" void art_portable_resolution_trampoline(mirror::ArtMethod*);
 extern "C" void art_portable_to_interpreter_bridge(mirror::ArtMethod*);
diff --git a/runtime/arch/x86_64/portable_entrypoints_x86_64.S b/runtime/arch/x86_64/portable_entrypoints_x86_64.S
index 7b84d17..3a54005 100644
--- a/runtime/arch/x86_64/portable_entrypoints_x86_64.S
+++ b/runtime/arch/x86_64/portable_entrypoints_x86_64.S
@@ -25,4 +25,6 @@
 
 UNIMPLEMENTED art_portable_resolution_trampoline
 
-UNIMPLEMENTED_NO_HIDE art_portable_to_interpreter_bridge
+UNIMPLEMENTED art_portable_to_interpreter_bridge
+
+UNIMPLEMENTED art_portable_imt_conflict_trampoline
diff --git a/runtime/arch/x86_64/quick_entrypoints_x86_64.S b/runtime/arch/x86_64/quick_entrypoints_x86_64.S
index ca9c0bf..648a99a 100644
--- a/runtime/arch/x86_64/quick_entrypoints_x86_64.S
+++ b/runtime/arch/x86_64/quick_entrypoints_x86_64.S
@@ -1266,7 +1266,7 @@
     /*
      * Called to do a generic JNI down-call
      */
-DEFINE_FUNCTION_NO_HIDE art_quick_generic_jni_trampoline
+DEFINE_FUNCTION art_quick_generic_jni_trampoline
     // Save callee and GPR args, mixed together to agree with core spills bitmap.
     PUSH r15  // Callee save.
     PUSH r14  // Callee save.
@@ -1453,7 +1453,7 @@
      * RDI = method being called / to bridge to.
      * RSI, RDX, RCX, R8, R9 are arguments to that method.
      */
-DEFINE_FUNCTION_NO_HIDE art_quick_to_interpreter_bridge
+DEFINE_FUNCTION art_quick_to_interpreter_bridge
     SETUP_REF_AND_ARGS_CALLEE_SAVE_FRAME   // Set up frame and save arguments.
     movq %gs:THREAD_SELF_OFFSET, %rsi      // RSI := Thread::Current()
     movq %rsp, %rdx                        // RDX := sp
diff --git a/runtime/base/logging.cc b/runtime/base/logging.cc
index b2ad1d0..5af597b 100644
--- a/runtime/base/logging.cc
+++ b/runtime/base/logging.cc
@@ -85,7 +85,7 @@
   }
 
   std::vector<std::string> specs;
-  Split(tags, ' ', specs);
+  Split(tags, ' ', &specs);
   for (size_t i = 0; i < specs.size(); ++i) {
     // "tag-pattern:[vdiwefs]"
     std::string spec(specs[i]);
diff --git a/runtime/base/macros.h b/runtime/base/macros.h
index f5a38bb..c80d35e 100644
--- a/runtime/base/macros.h
+++ b/runtime/base/macros.h
@@ -63,6 +63,11 @@
 #define COMPILE_ASSERT(expr, msg) \
   typedef CompileAssert<(bool(expr))> msg[bool(expr) ? 1 : -1] // NOLINT
 
+// Declare a friend relationship in a class with a test. Used rather that FRIEND_TEST to avoid
+// globally importing gtest/gtest.h into the main ART header files.
+#define ART_FRIEND_TEST(test_set_name, individual_test)\
+friend class test_set_name##_##individual_test##_Test
+
 // DISALLOW_COPY_AND_ASSIGN disallows the copy and operator= functions.
 // It goes in the private: declarations in a class.
 #define DISALLOW_COPY_AND_ASSIGN(TypeName) \
diff --git a/runtime/base/mutex.cc b/runtime/base/mutex.cc
index cbcd408..70b6f7e 100644
--- a/runtime/base/mutex.cc
+++ b/runtime/base/mutex.cc
@@ -83,18 +83,25 @@
 }
 #endif
 
-class ScopedAllMutexesLock {
+class ScopedAllMutexesLock FINAL {
  public:
   explicit ScopedAllMutexesLock(const BaseMutex* mutex) : mutex_(mutex) {
     while (!gAllMutexData->all_mutexes_guard.CompareExchangeWeakAcquire(0, mutex)) {
       NanoSleep(100);
     }
   }
+
   ~ScopedAllMutexesLock() {
+#if !defined(__clang__)
+    // TODO: remove this workaround target GCC/libc++/bionic bug "invalid failure memory model".
+    while (!gAllMutexData->all_mutexes_guard.CompareExchangeWeakSequentiallyConsistent(mutex_, 0)) {
+#else
     while (!gAllMutexData->all_mutexes_guard.CompareExchangeWeakRelease(mutex_, 0)) {
+#endif
       NanoSleep(100);
     }
   }
+
  private:
   const BaseMutex* const mutex_;
 };
diff --git a/runtime/class_linker.cc b/runtime/class_linker.cc
index 5718e44..bbbb9e0 100644
--- a/runtime/class_linker.cc
+++ b/runtime/class_linker.cc
@@ -32,6 +32,7 @@
 #include "compiler_callbacks.h"
 #include "debugger.h"
 #include "dex_file-inl.h"
+#include "entrypoints/runtime_asm_entrypoints.h"
 #include "gc_root-inl.h"
 #include "gc/accounting/card_table-inl.h"
 #include "gc/accounting/heap_bitmap.h"
@@ -236,44 +237,6 @@
   }
 }
 
-const char* ClassLinker::class_roots_descriptors_[] = {
-  "Ljava/lang/Class;",
-  "Ljava/lang/Object;",
-  "[Ljava/lang/Class;",
-  "[Ljava/lang/Object;",
-  "Ljava/lang/String;",
-  "Ljava/lang/DexCache;",
-  "Ljava/lang/ref/Reference;",
-  "Ljava/lang/reflect/ArtField;",
-  "Ljava/lang/reflect/ArtMethod;",
-  "Ljava/lang/reflect/Proxy;",
-  "[Ljava/lang/String;",
-  "[Ljava/lang/reflect/ArtField;",
-  "[Ljava/lang/reflect/ArtMethod;",
-  "Ljava/lang/ClassLoader;",
-  "Ljava/lang/Throwable;",
-  "Ljava/lang/ClassNotFoundException;",
-  "Ljava/lang/StackTraceElement;",
-  "Z",
-  "B",
-  "C",
-  "D",
-  "F",
-  "I",
-  "J",
-  "S",
-  "V",
-  "[Z",
-  "[B",
-  "[C",
-  "[D",
-  "[F",
-  "[I",
-  "[J",
-  "[S",
-  "[Ljava/lang/StackTraceElement;",
-};
-
 ClassLinker::ClassLinker(InternTable* intern_table)
     // dex_lock_ is recursive as it may be used in stack dumping.
     : dex_lock_("ClassLinker dex lock", kDefaultMutexLevel),
@@ -292,16 +255,9 @@
       quick_imt_conflict_trampoline_(nullptr),
       quick_generic_jni_trampoline_(nullptr),
       quick_to_interpreter_bridge_trampoline_(nullptr) {
-  CHECK_EQ(arraysize(class_roots_descriptors_), size_t(kClassRootsMax));
   memset(find_array_class_cache_, 0, kFindArrayCacheSize * sizeof(mirror::Class*));
 }
 
-// To set a value for generic JNI. May be necessary in compiler tests.
-extern "C" void art_quick_generic_jni_trampoline(mirror::ArtMethod*);
-extern "C" void art_quick_resolution_trampoline(mirror::ArtMethod*);
-extern "C" void art_quick_imt_conflict_trampoline(mirror::ArtMethod*);
-extern "C" void art_quick_to_interpreter_bridge(mirror::ArtMethod*);
-
 void ClassLinker::InitWithoutImage(const std::vector<const DexFile*>& boot_class_path) {
   VLOG(startup) << "ClassLinker::Init";
   CHECK(!Runtime::Current()->GetHeap()->HasImageSpace()) << "Runtime has image. We should use it.";
@@ -482,12 +438,12 @@
 
   // Set up GenericJNI entrypoint. That is mainly a hack for common_compiler_test.h so that
   // we do not need friend classes or a publicly exposed setter.
-  quick_generic_jni_trampoline_ = reinterpret_cast<void*>(art_quick_generic_jni_trampoline);
+  quick_generic_jni_trampoline_ = GetQuickGenericJniStub();
   if (!runtime->IsCompiler()) {
     // We need to set up the generic trampolines since we don't have an image.
-    quick_resolution_trampoline_ = reinterpret_cast<void*>(art_quick_resolution_trampoline);
-    quick_imt_conflict_trampoline_ = reinterpret_cast<void*>(art_quick_imt_conflict_trampoline);
-    quick_to_interpreter_bridge_trampoline_ = reinterpret_cast<void*>(art_quick_to_interpreter_bridge);
+    quick_resolution_trampoline_ = GetQuickResolutionStub();
+    quick_imt_conflict_trampoline_ = GetQuickImtConflictStub();
+    quick_to_interpreter_bridge_trampoline_ = GetQuickToInterpreterBridge();
   }
 
   // Object, String and DexCache need to be rerun through FindSystemClass to finish init
@@ -571,15 +527,15 @@
   CHECK_EQ(java_lang_reflect_ArtField.Get(), Art_field_class);
 
   mirror::Class* String_array_class =
-      FindSystemClass(self, class_roots_descriptors_[kJavaLangStringArrayClass]);
+      FindSystemClass(self, GetClassRootDescriptor(kJavaLangStringArrayClass));
   CHECK_EQ(object_array_string.Get(), String_array_class);
 
   mirror::Class* Art_method_array_class =
-      FindSystemClass(self, class_roots_descriptors_[kJavaLangReflectArtMethodArrayClass]);
+      FindSystemClass(self, GetClassRootDescriptor(kJavaLangReflectArtMethodArrayClass));
   CHECK_EQ(object_array_art_method.Get(), Art_method_array_class);
 
   mirror::Class* Art_field_array_class =
-      FindSystemClass(self, class_roots_descriptors_[kJavaLangReflectArtFieldArrayClass]);
+      FindSystemClass(self, GetClassRootDescriptor(kJavaLangReflectArtFieldArrayClass));
   CHECK_EQ(object_array_art_field.Get(), Art_field_array_class);
 
   // End of special init trickery, subsequent classes may be loaded via FindSystemClass.
@@ -1666,7 +1622,7 @@
   if (obj->IsArtMethod()) {
     mirror::ArtMethod* method = obj->AsArtMethod();
     if (!method->IsNative()) {
-      method->SetEntryPointFromInterpreter(interpreter::artInterpreterToInterpreterBridge);
+      method->SetEntryPointFromInterpreter(artInterpreterToInterpreterBridge);
       if (method != Runtime::Current()->GetResolutionMethod()) {
         method->SetEntryPointFromQuickCompiledCode(GetQuickToInterpreterBridge());
         method->SetEntryPointFromPortableCompiledCode(GetPortableToInterpreterBridge());
@@ -2535,7 +2491,7 @@
   if (result == nullptr) {
     if (method->IsNative()) {
       // No code and native? Use generic trampoline.
-      result = GetQuickGenericJniTrampoline();
+      result = GetQuickGenericJniStub();
     } else if (method->IsPortableCompiled()) {
       // No code? Do we expect portable code?
       result = GetQuickToPortableBridge();
@@ -2689,7 +2645,7 @@
       // Use interpreter entry point.
       // Check whether the method is native, in which case it's generic JNI.
       if (quick_code == nullptr && portable_code == nullptr && method->IsNative()) {
-        quick_code = GetQuickGenericJniTrampoline();
+        quick_code = GetQuickGenericJniStub();
         portable_code = GetPortableToQuickBridge();
       } else {
         portable_code = GetPortableToInterpreterBridge();
@@ -2715,7 +2671,8 @@
                            const OatFile::OatClass* oat_class,
                            const DexFile& dex_file, uint32_t dex_method_index,
                            uint32_t method_index) {
-  if (Runtime::Current()->IsCompiler()) {
+  Runtime* runtime = Runtime::Current();
+  if (runtime->IsCompiler()) {
     // The following code only applies to a non-compiler runtime.
     return;
   }
@@ -2734,7 +2691,7 @@
                                             method->GetEntryPointFromQuickCompiledCode(),
                                             method->GetEntryPointFromPortableCompiledCode());
   if (enter_interpreter && !method->IsNative()) {
-    method->SetEntryPointFromInterpreter(interpreter::artInterpreterToInterpreterBridge);
+    method->SetEntryPointFromInterpreter(artInterpreterToInterpreterBridge);
   } else {
     method->SetEntryPointFromInterpreter(artInterpreterToCompiledCodeBridge);
   }
@@ -2750,15 +2707,15 @@
     // For static methods excluding the class initializer, install the trampoline.
     // It will be replaced by the proper entry point by ClassLinker::FixupStaticTrampolines
     // after initializing class (see ClassLinker::InitializeClass method).
-    method->SetEntryPointFromQuickCompiledCode(GetQuickResolutionTrampoline());
-    method->SetEntryPointFromPortableCompiledCode(GetPortableResolutionTrampoline());
+    method->SetEntryPointFromQuickCompiledCode(GetQuickResolutionStub());
+    method->SetEntryPointFromPortableCompiledCode(GetPortableResolutionStub());
   } else if (enter_interpreter) {
     if (!method->IsNative()) {
       // Set entry point from compiled code if there's no code or in interpreter only mode.
       method->SetEntryPointFromQuickCompiledCode(GetQuickToInterpreterBridge());
       method->SetEntryPointFromPortableCompiledCode(GetPortableToInterpreterBridge());
     } else {
-      method->SetEntryPointFromQuickCompiledCode(GetQuickGenericJniTrampoline());
+      method->SetEntryPointFromQuickCompiledCode(GetQuickGenericJniStub());
       method->SetEntryPointFromPortableCompiledCode(GetPortableToQuickBridge());
     }
   } else if (method->GetEntryPointFromPortableCompiledCode() != nullptr) {
@@ -2772,18 +2729,18 @@
 
   if (method->IsNative()) {
     // Unregistering restores the dlsym lookup stub.
-    method->UnregisterNative(Thread::Current());
+    method->UnregisterNative();
 
     if (enter_interpreter) {
-      // We have a native method here without code. Then it should have either the GenericJni
-      // trampoline as entrypoint (non-static), or the Resolution trampoline (static).
-      DCHECK(method->GetEntryPointFromQuickCompiledCode() == GetQuickResolutionTrampoline()
-          || method->GetEntryPointFromQuickCompiledCode() == GetQuickGenericJniTrampoline());
+      // We have a native method here without code. Then it should have either the generic JNI
+      // trampoline as entrypoint (non-static), or the resolution trampoline (static).
+      // TODO: this doesn't handle all the cases where trampolines may be installed.
+      const void* entry_point = method->GetEntryPointFromQuickCompiledCode();
+      DCHECK(IsQuickGenericJniStub(entry_point) || IsQuickResolutionStub(entry_point));
     }
   }
 
   // Allow instrumentation its chance to hijack code.
-  Runtime* runtime = Runtime::Current();
   runtime->GetInstrumentation()->UpdateMethodsCode(method.Get(),
                                                    method->GetEntryPointFromQuickCompiledCode(),
                                                    method->GetEntryPointFromPortableCompiledCode(),
@@ -3224,13 +3181,13 @@
       new_class.Assign(GetClassRoot(kClassArrayClass));
     } else if (strcmp(descriptor, "[Ljava/lang/Object;") == 0) {
       new_class.Assign(GetClassRoot(kObjectArrayClass));
-    } else if (strcmp(descriptor, class_roots_descriptors_[kJavaLangStringArrayClass]) == 0) {
+    } else if (strcmp(descriptor, GetClassRootDescriptor(kJavaLangStringArrayClass)) == 0) {
       new_class.Assign(GetClassRoot(kJavaLangStringArrayClass));
     } else if (strcmp(descriptor,
-                      class_roots_descriptors_[kJavaLangReflectArtMethodArrayClass]) == 0) {
+                      GetClassRootDescriptor(kJavaLangReflectArtMethodArrayClass)) == 0) {
       new_class.Assign(GetClassRoot(kJavaLangReflectArtMethodArrayClass));
     } else if (strcmp(descriptor,
-                      class_roots_descriptors_[kJavaLangReflectArtFieldArrayClass]) == 0) {
+                      GetClassRootDescriptor(kJavaLangReflectArtFieldArrayClass)) == 0) {
       new_class.Assign(GetClassRoot(kJavaLangReflectArtFieldArrayClass));
     } else if (strcmp(descriptor, "[C") == 0) {
       new_class.Assign(GetClassRoot(kCharArrayClass));
@@ -5546,6 +5503,84 @@
   }
 }
 
+static OatFile::OatMethod CreateOatMethod(const void* code, const uint8_t* gc_map,
+                                          bool is_portable) {
+  CHECK_EQ(kUsePortableCompiler, is_portable);
+  CHECK(code != nullptr);
+  const uint8_t* base;
+  uint32_t code_offset, gc_map_offset;
+  if (gc_map == nullptr) {
+    base = reinterpret_cast<const uint8_t*>(code);  // Base of data points at code.
+    base -= sizeof(void*);  // Move backward so that code_offset != 0.
+    code_offset = sizeof(void*);
+    gc_map_offset = 0;
+  } else {
+    // TODO: 64bit support.
+    base = nullptr;  // Base of data in oat file, ie 0.
+    code_offset = PointerToLowMemUInt32(code);
+    gc_map_offset = PointerToLowMemUInt32(gc_map);
+  }
+  return OatFile::OatMethod(base, code_offset, gc_map_offset);
+}
+
+bool ClassLinker::IsPortableResolutionStub(const void* entry_point) const {
+  return (entry_point == GetPortableResolutionStub()) ||
+      (portable_resolution_trampoline_ == entry_point);
+}
+
+bool ClassLinker::IsQuickResolutionStub(const void* entry_point) const {
+  return (entry_point == GetQuickResolutionStub()) ||
+      (quick_resolution_trampoline_ == entry_point);
+}
+
+bool ClassLinker::IsPortableToInterpreterBridge(const void* entry_point) const {
+  return (entry_point == GetPortableToInterpreterBridge());
+  // TODO: portable_to_interpreter_bridge_trampoline_ == entry_point;
+}
+
+bool ClassLinker::IsQuickToInterpreterBridge(const void* entry_point) const {
+  return (entry_point == GetQuickToInterpreterBridge()) ||
+      (quick_to_interpreter_bridge_trampoline_ == entry_point);
+}
+
+bool ClassLinker::IsQuickGenericJniStub(const void* entry_point) const {
+  return (entry_point == GetQuickGenericJniStub()) ||
+      (quick_generic_jni_trampoline_ == entry_point);
+}
+
+const void* ClassLinker::GetRuntimeQuickGenericJniStub() const {
+  return GetQuickGenericJniStub();
+}
+
+void ClassLinker::SetEntryPointsToCompiledCode(mirror::ArtMethod* method, const void* method_code,
+                                               bool is_portable) const {
+  OatFile::OatMethod oat_method = CreateOatMethod(method_code, nullptr, is_portable);
+  oat_method.LinkMethod(method);
+  method->SetEntryPointFromInterpreter(artInterpreterToCompiledCodeBridge);
+  // Create bridges to transition between different kinds of compiled bridge.
+  if (method->GetEntryPointFromPortableCompiledCode() == nullptr) {
+    method->SetEntryPointFromPortableCompiledCode(GetPortableToQuickBridge());
+  } else {
+    CHECK(method->GetEntryPointFromQuickCompiledCode() == nullptr);
+    method->SetEntryPointFromQuickCompiledCode(GetQuickToPortableBridge());
+    method->SetIsPortableCompiled();
+  }
+}
+
+void ClassLinker::SetEntryPointsToInterpreter(mirror::ArtMethod* method) const {
+  if (!method->IsNative()) {
+    method->SetEntryPointFromInterpreter(artInterpreterToInterpreterBridge);
+    method->SetEntryPointFromPortableCompiledCode(GetPortableToInterpreterBridge());
+    method->SetEntryPointFromQuickCompiledCode(GetQuickToInterpreterBridge());
+  } else {
+    const void* quick_method_code = GetQuickGenericJniStub();
+    OatFile::OatMethod oat_method = CreateOatMethod(quick_method_code, nullptr, false);
+    oat_method.LinkMethod(method);
+    method->SetEntryPointFromInterpreter(artInterpreterToCompiledCodeBridge);
+    method->SetEntryPointFromPortableCompiledCode(GetPortableToQuickBridge());
+  }
+}
+
 void ClassLinker::DumpForSigQuit(std::ostream& os) {
   Thread* self = Thread::Current();
   if (dex_cache_image_class_lookup_required_) {
@@ -5584,4 +5619,50 @@
   class_roots->Set<false>(class_root, klass);
 }
 
+const char* ClassLinker::GetClassRootDescriptor(ClassRoot class_root) {
+  static const char* class_roots_descriptors[] = {
+    "Ljava/lang/Class;",
+    "Ljava/lang/Object;",
+    "[Ljava/lang/Class;",
+    "[Ljava/lang/Object;",
+    "Ljava/lang/String;",
+    "Ljava/lang/DexCache;",
+    "Ljava/lang/ref/Reference;",
+    "Ljava/lang/reflect/ArtField;",
+    "Ljava/lang/reflect/ArtMethod;",
+    "Ljava/lang/reflect/Proxy;",
+    "[Ljava/lang/String;",
+    "[Ljava/lang/reflect/ArtField;",
+    "[Ljava/lang/reflect/ArtMethod;",
+    "Ljava/lang/ClassLoader;",
+    "Ljava/lang/Throwable;",
+    "Ljava/lang/ClassNotFoundException;",
+    "Ljava/lang/StackTraceElement;",
+    "Z",
+    "B",
+    "C",
+    "D",
+    "F",
+    "I",
+    "J",
+    "S",
+    "V",
+    "[Z",
+    "[B",
+    "[C",
+    "[D",
+    "[F",
+    "[I",
+    "[J",
+    "[S",
+    "[Ljava/lang/StackTraceElement;",
+  };
+  COMPILE_ASSERT(arraysize(class_roots_descriptors) == size_t(kClassRootsMax),
+                 mismatch_between_class_descriptors_and_class_root_enum);
+
+  const char* descriptor = class_roots_descriptors[class_root];
+  CHECK(descriptor != nullptr);
+  return descriptor;
+}
+
 }  // namespace art
diff --git a/runtime/class_linker.h b/runtime/class_linker.h
index 373fa89..1847926 100644
--- a/runtime/class_linker.h
+++ b/runtime/class_linker.h
@@ -27,7 +27,6 @@
 #include "base/mutex.h"
 #include "dex_file.h"
 #include "gc_root.h"
-#include "gtest/gtest.h"
 #include "jni.h"
 #include "oat_file.h"
 #include "object_callbacks.h"
@@ -60,6 +59,46 @@
 
 class ClassLinker {
  public:
+  // Well known mirror::Class roots accessed via GetClassRoot.
+  enum ClassRoot {
+    kJavaLangClass,
+    kJavaLangObject,
+    kClassArrayClass,
+    kObjectArrayClass,
+    kJavaLangString,
+    kJavaLangDexCache,
+    kJavaLangRefReference,
+    kJavaLangReflectArtField,
+    kJavaLangReflectArtMethod,
+    kJavaLangReflectProxy,
+    kJavaLangStringArrayClass,
+    kJavaLangReflectArtFieldArrayClass,
+    kJavaLangReflectArtMethodArrayClass,
+    kJavaLangClassLoader,
+    kJavaLangThrowable,
+    kJavaLangClassNotFoundException,
+    kJavaLangStackTraceElement,
+    kPrimitiveBoolean,
+    kPrimitiveByte,
+    kPrimitiveChar,
+    kPrimitiveDouble,
+    kPrimitiveFloat,
+    kPrimitiveInt,
+    kPrimitiveLong,
+    kPrimitiveShort,
+    kPrimitiveVoid,
+    kBooleanArrayClass,
+    kByteArrayClass,
+    kCharArrayClass,
+    kDoubleArrayClass,
+    kFloatArrayClass,
+    kIntArrayClass,
+    kLongArrayClass,
+    kShortArrayClass,
+    kJavaLangStackTraceElementArrayClass,
+    kClassRootsMax,
+  };
+
   explicit ClassLinker(InternTable* intern_table);
   ~ClassLinker();
 
@@ -371,34 +410,38 @@
   pid_t GetClassesLockOwner();  // For SignalCatcher.
   pid_t GetDexLockOwner();  // For SignalCatcher.
 
-  const void* GetPortableResolutionTrampoline() const {
-    return portable_resolution_trampoline_;
-  }
+  mirror::Class* GetClassRoot(ClassRoot class_root) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
 
-  const void* GetQuickGenericJniTrampoline() const {
-    return quick_generic_jni_trampoline_;
-  }
+  static const char* GetClassRootDescriptor(ClassRoot class_root);
 
-  const void* GetQuickResolutionTrampoline() const {
-    return quick_resolution_trampoline_;
-  }
+  // Is the given entry point portable code to run the resolution stub?
+  bool IsPortableResolutionStub(const void* entry_point) const;
 
-  const void* GetPortableImtConflictTrampoline() const {
-    return portable_imt_conflict_trampoline_;
-  }
+  // Is the given entry point quick code to run the resolution stub?
+  bool IsQuickResolutionStub(const void* entry_point) const;
 
-  const void* GetQuickImtConflictTrampoline() const {
-    return quick_imt_conflict_trampoline_;
-  }
+  // Is the given entry point portable code to bridge into the interpreter?
+  bool IsPortableToInterpreterBridge(const void* entry_point) const;
 
-  const void* GetQuickToInterpreterBridgeTrampoline() const {
-    return quick_to_interpreter_bridge_trampoline_;
-  }
+  // Is the given entry point quick code to bridge into the interpreter?
+  bool IsQuickToInterpreterBridge(const void* entry_point) const;
+
+  // Is the given entry point quick code to run the generic JNI stub?
+  bool IsQuickGenericJniStub(const void* entry_point) const;
 
   InternTable* GetInternTable() const {
     return intern_table_;
   }
 
+  // Set the entrypoints up for method to the given code.
+  void SetEntryPointsToCompiledCode(mirror::ArtMethod* method, const void* method_code,
+                                    bool is_portable) const
+      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+
+  // Set the entrypoints up for method to the enter the interpreter.
+  void SetEntryPointsToInterpreter(mirror::ArtMethod* method) const
+      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+
   // Attempts to insert a class into a class table.  Returns NULL if
   // the class was inserted, otherwise returns an existing class with
   // the same descriptor and ClassLoader.
@@ -668,6 +711,12 @@
   void FixupTemporaryDeclaringClass(mirror::Class* temp_class, mirror::Class* new_class)
       SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
 
+  void SetClassRoot(ClassRoot class_root, mirror::Class* klass)
+      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+
+  // Return the quick generic JNI stub for testing.
+  const void* GetRuntimeQuickGenericJniStub() const;
+
   std::vector<const DexFile*> boot_class_path_;
 
   mutable ReaderWriterMutex dex_lock_ DEFAULT_MUTEX_ACQUIRED_AFTER;
@@ -691,61 +740,9 @@
   // the classes into the class_table_ to avoid dex cache based searches.
   Atomic<uint32_t> failed_dex_cache_class_lookups_;
 
-  // indexes into class_roots_.
-  // needs to be kept in sync with class_roots_descriptors_.
-  enum ClassRoot {
-    kJavaLangClass,
-    kJavaLangObject,
-    kClassArrayClass,
-    kObjectArrayClass,
-    kJavaLangString,
-    kJavaLangDexCache,
-    kJavaLangRefReference,
-    kJavaLangReflectArtField,
-    kJavaLangReflectArtMethod,
-    kJavaLangReflectProxy,
-    kJavaLangStringArrayClass,
-    kJavaLangReflectArtFieldArrayClass,
-    kJavaLangReflectArtMethodArrayClass,
-    kJavaLangClassLoader,
-    kJavaLangThrowable,
-    kJavaLangClassNotFoundException,
-    kJavaLangStackTraceElement,
-    kPrimitiveBoolean,
-    kPrimitiveByte,
-    kPrimitiveChar,
-    kPrimitiveDouble,
-    kPrimitiveFloat,
-    kPrimitiveInt,
-    kPrimitiveLong,
-    kPrimitiveShort,
-    kPrimitiveVoid,
-    kBooleanArrayClass,
-    kByteArrayClass,
-    kCharArrayClass,
-    kDoubleArrayClass,
-    kFloatArrayClass,
-    kIntArrayClass,
-    kLongArrayClass,
-    kShortArrayClass,
-    kJavaLangStackTraceElementArrayClass,
-    kClassRootsMax,
-  };
+  // Well known mirror::Class roots.
   GcRoot<mirror::ObjectArray<mirror::Class>> class_roots_;
 
-  mirror::Class* GetClassRoot(ClassRoot class_root) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
-
-  void SetClassRoot(ClassRoot class_root, mirror::Class* klass)
-      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
-
-  static const char* class_roots_descriptors_[];
-
-  const char* GetClassRootDescriptor(ClassRoot class_root) {
-    const char* descriptor = class_roots_descriptors_[class_root];
-    CHECK(descriptor != NULL);
-    return descriptor;
-  }
-
   // The interface table used by all arrays.
   GcRoot<mirror::IfTable> array_iftable_;
 
@@ -773,12 +770,11 @@
   friend class ImageWriter;  // for GetClassRoots
   friend class ImageDumper;  // for FindOpenedOatFileFromOatLocation
   friend class ElfPatcher;  // for FindOpenedOatFileForDexFile & FindOpenedOatFileFromOatLocation
+  friend class JniCompilerTest;  // for GetRuntimeQuickGenericJniStub
   friend class NoDex2OatTest;  // for FindOpenedOatFileForDexFile
   friend class NoPatchoatTest;  // for FindOpenedOatFileForDexFile
-  FRIEND_TEST(ClassLinkerTest, ClassRootDescriptors);
-  FRIEND_TEST(mirror::DexCacheTest, Open);
-  FRIEND_TEST(ExceptionTest, FindExceptionHandler);
-  FRIEND_TEST(ObjectTest, AllocObjectArray);
+  ART_FRIEND_TEST(mirror::DexCacheTest, Open);  // for AllocDexCache
+
   DISALLOW_COPY_AND_ASSIGN(ClassLinker);
 };
 
diff --git a/runtime/debugger.cc b/runtime/debugger.cc
index 971ff89..b676c62 100644
--- a/runtime/debugger.cc
+++ b/runtime/debugger.cc
@@ -617,7 +617,7 @@
   VLOG(jdwp) << "ParseJdwpOptions: " << options;
 
   std::vector<std::string> pairs;
-  Split(options, ',', pairs);
+  Split(options, ',', &pairs);
 
   for (size_t i = 0; i < pairs.size(); ++i) {
     std::string::size_type equals = pairs[i].find('=');
diff --git a/runtime/entrypoints/entrypoint_utils.h b/runtime/entrypoints/entrypoint_utils.h
index ce34993..c46d887 100644
--- a/runtime/entrypoints/entrypoint_utils.h
+++ b/runtime/entrypoints/entrypoint_utils.h
@@ -183,59 +183,6 @@
 bool FillArrayData(mirror::Object* obj, const Instruction::ArrayDataPayload* payload)
     SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
 
-// Entry point for deoptimization.
-extern "C" void art_quick_deoptimize();
-static inline uintptr_t GetQuickDeoptimizationEntryPoint() {
-  return reinterpret_cast<uintptr_t>(art_quick_deoptimize);
-}
-
-// Return address of instrumentation stub.
-extern "C" void art_quick_instrumentation_entry(void*);
-static inline void* GetQuickInstrumentationEntryPoint() {
-  return reinterpret_cast<void*>(art_quick_instrumentation_entry);
-}
-
-// The return_pc of instrumentation exit stub.
-extern "C" void art_quick_instrumentation_exit();
-static inline uintptr_t GetQuickInstrumentationExitPc() {
-  return reinterpret_cast<uintptr_t>(art_quick_instrumentation_exit);
-}
-
-extern "C" void art_portable_to_interpreter_bridge(mirror::ArtMethod*);
-static inline const void* GetPortableToInterpreterBridge() {
-  return reinterpret_cast<void*>(art_portable_to_interpreter_bridge);
-}
-
-static inline const void* GetPortableToQuickBridge() {
-  // TODO: portable to quick bridge. Bug: 8196384
-  return GetPortableToInterpreterBridge();
-}
-
-extern "C" void art_quick_to_interpreter_bridge(mirror::ArtMethod*);
-static inline const void* GetQuickToInterpreterBridge() {
-  return reinterpret_cast<void*>(art_quick_to_interpreter_bridge);
-}
-
-static inline const void* GetQuickToPortableBridge() {
-  // TODO: quick to portable bridge. Bug: 8196384
-  return GetQuickToInterpreterBridge();
-}
-
-extern "C" void art_portable_proxy_invoke_handler();
-static inline const void* GetPortableProxyInvokeHandler() {
-  return reinterpret_cast<void*>(art_portable_proxy_invoke_handler);
-}
-
-extern "C" void art_quick_proxy_invoke_handler();
-static inline const void* GetQuickProxyInvokeHandler() {
-  return reinterpret_cast<void*>(art_quick_proxy_invoke_handler);
-}
-
-extern "C" void* art_jni_dlsym_lookup_stub(JNIEnv*, jobject);
-static inline void* GetJniDlsymLookupStub() {
-  return reinterpret_cast<void*>(art_jni_dlsym_lookup_stub);
-}
-
 template <typename INT_TYPE, typename FLOAT_TYPE>
 static inline INT_TYPE art_float_to_integral(FLOAT_TYPE f);
 
diff --git a/runtime/entrypoints/interpreter/interpreter_entrypoints.cc b/runtime/entrypoints/interpreter/interpreter_entrypoints.cc
index b617636..908d3cd 100644
--- a/runtime/entrypoints/interpreter/interpreter_entrypoints.cc
+++ b/runtime/entrypoints/interpreter/interpreter_entrypoints.cc
@@ -25,7 +25,7 @@
 namespace art {
 
 // TODO: Make the MethodHelper here be compaction safe.
-extern "C" void artInterpreterToCompiledCodeBridge(Thread* self, MethodHelper& mh,
+extern "C" void artInterpreterToCompiledCodeBridge(Thread* self, MethodHelper* mh,
                                                    const DexFile::CodeItem* code_item,
                                                    ShadowFrame* shadow_frame, JValue* result) {
   mirror::ArtMethod* method = shadow_frame->GetMethod();
@@ -54,7 +54,7 @@
   } else {
     method->Invoke(self, shadow_frame->GetVRegArgs(arg_offset),
                    (shadow_frame->NumberOfVRegs() - arg_offset) * sizeof(uint32_t),
-                   result, mh.GetShorty());
+                   result, mh->GetShorty());
   }
 }
 
diff --git a/runtime/entrypoints/interpreter/interpreter_entrypoints.h b/runtime/entrypoints/interpreter/interpreter_entrypoints.h
index d8b2204..5d646e9 100644
--- a/runtime/entrypoints/interpreter/interpreter_entrypoints.h
+++ b/runtime/entrypoints/interpreter/interpreter_entrypoints.h
@@ -33,10 +33,10 @@
 
 // Pointers to functions that are called by interpreter trampolines via thread-local storage.
 struct PACKED(4) InterpreterEntryPoints {
-  void (*pInterpreterToInterpreterBridge)(Thread* self, MethodHelper& mh,
+  void (*pInterpreterToInterpreterBridge)(Thread* self, MethodHelper* mh,
                                           const DexFile::CodeItem* code_item,
                                           ShadowFrame* shadow_frame, JValue* result);
-  void (*pInterpreterToCompiledCodeBridge)(Thread* self, MethodHelper& mh,
+  void (*pInterpreterToCompiledCodeBridge)(Thread* self, MethodHelper* mh,
                                            const DexFile::CodeItem* code_item,
                                            ShadowFrame* shadow_frame, JValue* result);
 };
diff --git a/runtime/entrypoints/jni/jni_entrypoints.cc b/runtime/entrypoints/jni/jni_entrypoints.cc
index edb3b72..2752407 100644
--- a/runtime/entrypoints/jni/jni_entrypoints.cc
+++ b/runtime/entrypoints/jni/jni_entrypoints.cc
@@ -45,7 +45,7 @@
     return NULL;
   } else {
     // Register so that future calls don't come here
-    method->RegisterNative(self, native_code, false);
+    method->RegisterNative(native_code, false);
     return native_code;
   }
 }
diff --git a/runtime/entrypoints/portable/portable_trampoline_entrypoints.cc b/runtime/entrypoints/portable/portable_trampoline_entrypoints.cc
index 642c94a..c3664bf 100644
--- a/runtime/entrypoints/portable/portable_trampoline_entrypoints.cc
+++ b/runtime/entrypoints/portable/portable_trampoline_entrypoints.cc
@@ -19,6 +19,7 @@
 
 #include "dex_instruction-inl.h"
 #include "entrypoints/entrypoint_utils-inl.h"
+#include "entrypoints/runtime_asm_entrypoints.h"
 #include "interpreter/interpreter.h"
 #include "mirror/art_method-inl.h"
 #include "mirror/object-inl.h"
@@ -222,7 +223,7 @@
       }
     }
 
-    JValue result = interpreter::EnterInterpreterFromStub(self, mh, code_item, *shadow_frame);
+    JValue result = interpreter::EnterInterpreterFromEntryPoint(self, &mh, code_item, shadow_frame);
     // Pop transition.
     self->PopManagedStackFragment(fragment);
     return result.GetJ();
@@ -323,7 +324,7 @@
   uint32_t dex_pc;
   mirror::ArtMethod* caller = self->GetCurrentMethod(&dex_pc);
 
-  ClassLinker* linker = Runtime::Current()->GetClassLinker();
+  ClassLinker* class_linker = Runtime::Current()->GetClassLinker();
   InvokeType invoke_type;
   bool is_range;
   if (called->IsRuntimeMethod()) {
@@ -379,7 +380,7 @@
         is_range = true;
     }
     uint32_t dex_method_idx = (is_range) ? instr->VRegB_3rc() : instr->VRegB_35c();
-    called = linker->ResolveMethod(Thread::Current(), dex_method_idx, &caller, invoke_type);
+    called = class_linker->ResolveMethod(Thread::Current(), dex_method_idx, &caller, invoke_type);
     // Incompatible class change should have been handled in resolve method.
     CHECK(!called->CheckIncompatibleClassChange(invoke_type));
     // Refine called method based on receiver.
@@ -399,27 +400,27 @@
     // Ensure that the called method's class is initialized.
     StackHandleScope<1> hs(self);
     Handle<mirror::Class> called_class(hs.NewHandle(called->GetDeclaringClass()));
-    linker->EnsureInitialized(self, called_class, true, true);
+    class_linker->EnsureInitialized(self, called_class, true, true);
     if (LIKELY(called_class->IsInitialized())) {
       code = called->GetEntryPointFromPortableCompiledCode();
       // TODO: remove this after we solve the link issue.
       if (code == nullptr) {
         bool have_portable_code;
-        code = linker->GetPortableOatCodeFor(called, &have_portable_code);
+        code = class_linker->GetPortableOatCodeFor(called, &have_portable_code);
       }
     } else if (called_class->IsInitializing()) {
       if (invoke_type == kStatic) {
         // Class is still initializing, go to oat and grab code (trampoline must be left in place
         // until class is initialized to stop races between threads).
         bool have_portable_code;
-        code = linker->GetPortableOatCodeFor(called, &have_portable_code);
+        code = class_linker->GetPortableOatCodeFor(called, &have_portable_code);
       } else {
         // No trampoline for non-static methods.
         code = called->GetEntryPointFromPortableCompiledCode();
         // TODO: remove this after we solve the link issue.
         if (code == nullptr) {
           bool have_portable_code;
-          code = linker->GetPortableOatCodeFor(called, &have_portable_code);
+          code = class_linker->GetPortableOatCodeFor(called, &have_portable_code);
         }
       }
     } else {
@@ -430,7 +431,7 @@
     // Expect class to at least be initializing.
     DCHECK(called->GetDeclaringClass()->IsInitializing());
     // Don't want infinite recursion.
-    DCHECK(code != linker->GetPortableResolutionTrampoline());
+    DCHECK(!class_linker->IsPortableResolutionStub(code));
     // Set up entry into main method
     *called_addr = called;
   }
diff --git a/runtime/entrypoints/quick/quick_instrumentation_entrypoints.cc b/runtime/entrypoints/quick/quick_instrumentation_entrypoints.cc
index 42ace40..bb0e5e3 100644
--- a/runtime/entrypoints/quick/quick_instrumentation_entrypoints.cc
+++ b/runtime/entrypoints/quick/quick_instrumentation_entrypoints.cc
@@ -15,6 +15,7 @@
  */
 
 #include "callee_save_frame.h"
+#include "entrypoints/runtime_asm_entrypoints.h"
 #include "instruction_set.h"
 #include "instrumentation.h"
 #include "mirror/art_method-inl.h"
@@ -38,8 +39,7 @@
   } else {
     result = instrumentation->GetQuickCodeFor(method);
   }
-  DCHECK((result != Runtime::Current()->GetClassLinker()->GetQuickToInterpreterBridgeTrampoline())
-         || !Runtime::Current()->GetHeap()->HasImageSpace());
+  DCHECK(!Runtime::Current()->GetClassLinker()->IsQuickToInterpreterBridge(result));
   bool interpreter_entry = (result == GetQuickToInterpreterBridge());
   instrumentation->PushInstrumentationStackFrame(self, method->IsStatic() ? nullptr : this_object,
                                                  method, lr, interpreter_entry);
diff --git a/runtime/entrypoints/quick/quick_trampoline_entrypoints.cc b/runtime/entrypoints/quick/quick_trampoline_entrypoints.cc
index 96903db..224756b 100644
--- a/runtime/entrypoints/quick/quick_trampoline_entrypoints.cc
+++ b/runtime/entrypoints/quick/quick_trampoline_entrypoints.cc
@@ -19,6 +19,7 @@
 #include "dex_file-inl.h"
 #include "dex_instruction-inl.h"
 #include "entrypoints/entrypoint_utils-inl.h"
+#include "entrypoints/runtime_asm_entrypoints.h"
 #include "gc/accounting/card_table-inl.h"
 #include "instruction_set.h"
 #include "interpreter/interpreter.h"
@@ -504,7 +505,7 @@
         return 0;
       }
     }
-    JValue result = interpreter::EnterInterpreterFromStub(self, mh, code_item, *shadow_frame);
+    JValue result = interpreter::EnterInterpreterFromEntryPoint(self, &mh, code_item, shadow_frame);
     // Pop transition.
     self->PopManagedStackFragment(fragment);
     // No need to restore the args since the method has already been run by the interpreter.
diff --git a/runtime/entrypoints/quick/quick_trampoline_entrypoints_test.cc b/runtime/entrypoints/quick/quick_trampoline_entrypoints_test.cc
index 02b8a5b..41af88e 100644
--- a/runtime/entrypoints/quick/quick_trampoline_entrypoints_test.cc
+++ b/runtime/entrypoints/quick/quick_trampoline_entrypoints_test.cc
@@ -55,9 +55,10 @@
       NO_THREAD_SAFETY_ANALYSIS {
     mirror::ArtMethod* save_method = CreateCalleeSaveMethod(isa, type);
     QuickMethodFrameInfo frame_info = save_method->GetQuickFrameInfo();
-    EXPECT_EQ(save_method->GetReturnPcOffsetInBytes(), pc_offset) << "Expected and real pc offset"
-        " differs for " << type << " core spills=" << std::hex << frame_info.CoreSpillMask() <<
-        " fp spills=" << frame_info.FpSpillMask() << std::dec << " ISA " << isa;
+    EXPECT_EQ(save_method->GetReturnPcOffset().SizeValue(), pc_offset)
+        << "Expected and real pc offset differs for " << type
+        << " core spills=" << std::hex << frame_info.CoreSpillMask()
+        << " fp spills=" << frame_info.FpSpillMask() << std::dec << " ISA " << isa;
   }
 };
 
diff --git a/runtime/entrypoints/runtime_asm_entrypoints.h b/runtime/entrypoints/runtime_asm_entrypoints.h
new file mode 100644
index 0000000..db36a73
--- /dev/null
+++ b/runtime/entrypoints/runtime_asm_entrypoints.h
@@ -0,0 +1,117 @@
+/*
+ * Copyright (C) 2012 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef ART_RUNTIME_ENTRYPOINTS_RUNTIME_ASM_ENTRYPOINTS_H_
+#define ART_RUNTIME_ENTRYPOINTS_RUNTIME_ASM_ENTRYPOINTS_H_
+
+namespace art {
+
+#ifndef BUILDING_LIBART
+#error "File and symbols only for use within libart."
+#endif
+
+extern "C" void* art_jni_dlsym_lookup_stub(JNIEnv*, jobject);
+static inline const void* GetJniDlsymLookupStub() {
+  return reinterpret_cast<const void*>(art_jni_dlsym_lookup_stub);
+}
+
+// Return the address of portable stub code for handling IMT conflicts.
+extern "C" void art_portable_imt_conflict_trampoline(mirror::ArtMethod*);
+static inline const void* GetPortableImtConflictStub() {
+  return reinterpret_cast<const void*>(art_portable_imt_conflict_trampoline);
+}
+
+// Return the address of quick stub code for handling IMT conflicts.
+extern "C" void art_quick_imt_conflict_trampoline(mirror::ArtMethod*);
+static inline const void* GetQuickImtConflictStub() {
+  return reinterpret_cast<const void*>(art_quick_imt_conflict_trampoline);
+}
+
+// Return the address of portable stub code for bridging from portable code to the interpreter.
+extern "C" void art_portable_to_interpreter_bridge(mirror::ArtMethod*);
+static inline const void* GetPortableToInterpreterBridge() {
+  return reinterpret_cast<const void*>(art_portable_to_interpreter_bridge);
+}
+
+// Return the address of quick stub code for bridging from quick code to the interpreter.
+extern "C" void art_quick_to_interpreter_bridge(mirror::ArtMethod*);
+static inline const void* GetQuickToInterpreterBridge() {
+  return reinterpret_cast<const void*>(art_quick_to_interpreter_bridge);
+}
+
+// Return the address of portable stub code for bridging from portable code to quick.
+static inline const void* GetPortableToQuickBridge() {
+  // TODO: portable to quick bridge. Bug: 8196384
+  return GetPortableToInterpreterBridge();
+}
+
+// Return the address of quick stub code for bridging from quick code to portable.
+static inline const void* GetQuickToPortableBridge() {
+  // TODO: quick to portable bridge. Bug: 8196384
+  return GetQuickToInterpreterBridge();
+}
+
+// Return the address of quick stub code for handling JNI calls.
+extern "C" void art_quick_generic_jni_trampoline(mirror::ArtMethod*);
+static inline const void* GetQuickGenericJniStub() {
+  return reinterpret_cast<const void*>(art_quick_generic_jni_trampoline);
+}
+
+// Return the address of portable stub code for handling transitions into the proxy invoke handler.
+extern "C" void art_portable_proxy_invoke_handler();
+static inline const void* GetPortableProxyInvokeHandler() {
+  return reinterpret_cast<const void*>(art_portable_proxy_invoke_handler);
+}
+
+// Return the address of quick stub code for handling transitions into the proxy invoke handler.
+extern "C" void art_quick_proxy_invoke_handler();
+static inline const void* GetQuickProxyInvokeHandler() {
+  return reinterpret_cast<const void*>(art_quick_proxy_invoke_handler);
+}
+
+// Return the address of portable stub code for resolving a method at first call.
+extern "C" void art_portable_resolution_trampoline(mirror::ArtMethod*);
+static inline const void* GetPortableResolutionStub() {
+  return reinterpret_cast<const void*>(art_portable_resolution_trampoline);
+}
+
+// Return the address of quick stub code for resolving a method at first call.
+extern "C" void art_quick_resolution_trampoline(mirror::ArtMethod*);
+static inline const void* GetQuickResolutionStub() {
+  return reinterpret_cast<const void*>(art_quick_resolution_trampoline);
+}
+
+// Entry point for quick code that performs deoptimization.
+extern "C" void art_quick_deoptimize();
+static inline const void* GetQuickDeoptimizationEntryPoint() {
+  return reinterpret_cast<const void*>(art_quick_deoptimize);
+}
+
+// Return address of instrumentation entry point used by non-interpreter based tracing.
+extern "C" void art_quick_instrumentation_entry(void*);
+static inline const void* GetQuickInstrumentationEntryPoint() {
+  return reinterpret_cast<const void*>(art_quick_instrumentation_entry);
+}
+
+// The return_pc of instrumentation exit stub.
+extern "C" void art_quick_instrumentation_exit();
+static inline const void* GetQuickInstrumentationExitPc() {
+  return reinterpret_cast<const void*>(art_quick_instrumentation_exit);
+}
+
+}  // namespace art
+
+#endif  // ART_RUNTIME_ENTRYPOINTS_RUNTIME_ASM_ENTRYPOINTS_H_
diff --git a/runtime/exception_test.cc b/runtime/exception_test.cc
index 3a17eca..1714134 100644
--- a/runtime/exception_test.cc
+++ b/runtime/exception_test.cc
@@ -175,7 +175,7 @@
     fake_stack.push_back(reinterpret_cast<uintptr_t>(method_g_));
     fake_stack.push_back(0);
     fake_stack.push_back(0);
-    fake_stack.push_back(method_f_->ToNativePc(dex_pc));  // return pc
+    fake_stack.push_back(method_f_->ToNativeQuickPc(dex_pc));  // return pc
 
     // Create/push fake 16byte stack frame for method f
     fake_stack.push_back(reinterpret_cast<uintptr_t>(method_f_));
@@ -194,7 +194,7 @@
     // Set up thread to appear as if we called out of method_g_ at pc dex 3
     thread->SetTopOfStack(
         reinterpret_cast<StackReference<mirror::ArtMethod>*>(&fake_stack[0]),
-        method_g_->ToNativePc(dex_pc));  // return pc
+        method_g_->ToNativeQuickPc(dex_pc));  // return pc
   } else {
     // Create/push fake 20-byte shadow frame for method g
     fake_stack.push_back(0);
diff --git a/runtime/gc/heap.h b/runtime/gc/heap.h
index c09dca8..ba85c55 100644
--- a/runtime/gc/heap.h
+++ b/runtime/gc/heap.h
@@ -32,7 +32,6 @@
 #include "gc/collector_type.h"
 #include "gc/space/large_object_space.h"
 #include "globals.h"
-#include "gtest/gtest.h"
 #include "instruction_set.h"
 #include "jni.h"
 #include "object_callbacks.h"
diff --git a/runtime/gc/reference_queue.h b/runtime/gc/reference_queue.h
index dbf4abc..4ef8478 100644
--- a/runtime/gc/reference_queue.h
+++ b/runtime/gc/reference_queue.h
@@ -24,7 +24,6 @@
 #include "atomic.h"
 #include "base/timing_logger.h"
 #include "globals.h"
-#include "gtest/gtest.h"
 #include "jni.h"
 #include "object_callbacks.h"
 #include "offsets.h"
@@ -45,44 +44,56 @@
 class ReferenceQueue {
  public:
   explicit ReferenceQueue(Mutex* lock);
+
   // Enqueue a reference if is not already enqueued. Thread safe to call from multiple threads
   // since it uses a lock to avoid a race between checking for the references presence and adding
   // it.
   void AtomicEnqueueIfNotEnqueued(Thread* self, mirror::Reference* ref)
       SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) LOCKS_EXCLUDED(lock_);
+
   // Enqueue a reference, unlike EnqueuePendingReference, enqueue reference checks that the
   // reference IsEnqueueable. Not thread safe, used when mutators are paused to minimize lock
   // overhead.
   void EnqueueReference(mirror::Reference* ref) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+
   void EnqueuePendingReference(mirror::Reference* ref) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+
   mirror::Reference* DequeuePendingReference() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+
   // Enqueues finalizer references with white referents.  White referents are blackened, moved to the
   // zombie field, and the referent field is cleared.
   void EnqueueFinalizerReferences(ReferenceQueue* cleared_references,
                                   IsHeapReferenceMarkedCallback* is_marked_callback,
                                   MarkObjectCallback* mark_object_callback, void* arg)
       SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+
   // Walks the reference list marking any references subject to the reference clearing policy.
   // References with a black referent are removed from the list.  References with white referents
   // biased toward saving are blackened and also removed from the list.
   void ForwardSoftReferences(IsHeapReferenceMarkedCallback* preserve_callback, void* arg)
       SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+
   // Unlink the reference list clearing references objects with white referents.  Cleared references
   // registered to a reference queue are scheduled for appending by the heap worker thread.
   void ClearWhiteReferences(ReferenceQueue* cleared_references,
                             IsHeapReferenceMarkedCallback* is_marked_callback, void* arg)
       SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+
   void Dump(std::ostream& os) const
       SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+
   bool IsEmpty() const {
     return list_ == nullptr;
   }
+
   void Clear() {
     list_ = nullptr;
   }
+
   mirror::Reference* GetList() {
     return list_;
   }
+
   // Visits list_, currently only used for the mark compact GC.
   void UpdateRoots(IsMarkedCallback* callback, void* arg)
       SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
@@ -90,10 +101,13 @@
  private:
   // Lock, used for parallel GC reference enqueuing. It allows for multiple threads simultaneously
   // calling AtomicEnqueueIfNotEnqueued.
-  Mutex* lock_;
+  Mutex* const lock_;
+
   // The actual reference list. Only a root for the mark compact GC since it will be null for other
   // GC types.
   mirror::Reference* list_;
+
+  DISALLOW_COPY_AND_ASSIGN(ReferenceQueue);
 };
 
 }  // namespace gc
diff --git a/runtime/gc/space/image_space.cc b/runtime/gc/space/image_space.cc
index 452af90..39d82cc 100644
--- a/runtime/gc/space/image_space.cc
+++ b/runtime/gc/space/image_space.cc
@@ -118,7 +118,7 @@
                           std::string* error_msg) {
   const std::string boot_class_path_string(Runtime::Current()->GetBootClassPathString());
   std::vector<std::string> boot_class_path;
-  Split(boot_class_path_string, ':', boot_class_path);
+  Split(boot_class_path_string, ':', &boot_class_path);
   if (boot_class_path.empty()) {
     *error_msg = "Failed to generate image because no boot class path specified";
     return false;
diff --git a/runtime/instruction_set.cc b/runtime/instruction_set.cc
index 644e055..b5f8571 100644
--- a/runtime/instruction_set.cc
+++ b/runtime/instruction_set.cc
@@ -16,6 +16,12 @@
 
 #include "instruction_set.h"
 
+#include <fstream>
+
+#include "base/casts.h"
+#include "base/stringprintf.h"
+#include "utils.h"
+
 namespace art {
 
 const char* GetInstructionSetString(const InstructionSet isa) {
@@ -35,7 +41,7 @@
       return "none";
     default:
       LOG(FATAL) << "Unknown ISA " << isa;
-      return nullptr;
+      UNREACHABLE();
   }
 }
 
@@ -117,15 +123,385 @@
   }
 }
 
-std::string InstructionSetFeatures::GetFeatureString() const {
-  std::string result;
-  if ((mask_ & kHwDiv) != 0) {
-    result += "div";
+const InstructionSetFeatures* InstructionSetFeatures::FromVariant(InstructionSet isa,
+                                                                  const std::string& variant,
+                                                                  std::string* error_msg) {
+  const InstructionSetFeatures* result;
+  switch (isa) {
+    case kArm:
+    case kThumb2:
+      result = ArmInstructionSetFeatures::FromVariant(variant, error_msg);
+      break;
+    default:
+      result = UnknownInstructionSetFeatures::Unknown(isa);
+      break;
   }
-  if (result.size() == 0) {
-    result = "none";
+  CHECK_EQ(result == nullptr, error_msg->size() != 0);
+  return result;
+}
+
+const InstructionSetFeatures* InstructionSetFeatures::FromFeatureString(InstructionSet isa,
+                                                                        const std::string& feature_list,
+                                                                        std::string* error_msg) {
+  const InstructionSetFeatures* result;
+  switch (isa) {
+    case kArm:
+    case kThumb2:
+      result = ArmInstructionSetFeatures::FromFeatureString(feature_list, error_msg);
+      break;
+    default:
+      result = UnknownInstructionSetFeatures::Unknown(isa);
+      break;
+  }
+  // TODO: warn if feature_list doesn't agree with result's GetFeatureList().
+  CHECK_EQ(result == nullptr, error_msg->size() != 0);
+  return result;
+}
+
+const InstructionSetFeatures* InstructionSetFeatures::FromBitmap(InstructionSet isa,
+                                                                 uint32_t bitmap) {
+  const InstructionSetFeatures* result;
+  switch (isa) {
+    case kArm:
+    case kThumb2:
+      result = ArmInstructionSetFeatures::FromBitmap(bitmap);
+      break;
+    default:
+      result = UnknownInstructionSetFeatures::Unknown(isa);
+      break;
+  }
+  CHECK_EQ(bitmap, result->AsBitmap());
+  return result;
+}
+
+const InstructionSetFeatures* InstructionSetFeatures::FromCppDefines() {
+  const InstructionSetFeatures* result;
+  switch (kRuntimeISA) {
+    case kArm:
+    case kThumb2:
+      result = ArmInstructionSetFeatures::FromCppDefines();
+      break;
+    default:
+      result = UnknownInstructionSetFeatures::Unknown(kRuntimeISA);
+      break;
   }
   return result;
 }
 
+
+const InstructionSetFeatures* InstructionSetFeatures::FromCpuInfo() {
+  const InstructionSetFeatures* result;
+  switch (kRuntimeISA) {
+    case kArm:
+    case kThumb2:
+      result = ArmInstructionSetFeatures::FromCpuInfo();
+      break;
+    default:
+      result = UnknownInstructionSetFeatures::Unknown(kRuntimeISA);
+      break;
+  }
+  return result;
+}
+
+const InstructionSetFeatures* InstructionSetFeatures::FromHwcap() {
+  const InstructionSetFeatures* result;
+  switch (kRuntimeISA) {
+    case kArm:
+    case kThumb2:
+      result = ArmInstructionSetFeatures::FromHwcap();
+      break;
+    default:
+      result = UnknownInstructionSetFeatures::Unknown(kRuntimeISA);
+      break;
+  }
+  return result;
+}
+
+const InstructionSetFeatures* InstructionSetFeatures::FromAssembly() {
+  const InstructionSetFeatures* result;
+  switch (kRuntimeISA) {
+    case kArm:
+    case kThumb2:
+      result = ArmInstructionSetFeatures::FromAssembly();
+      break;
+    default:
+      result = UnknownInstructionSetFeatures::Unknown(kRuntimeISA);
+      break;
+  }
+  return result;
+}
+
+const ArmInstructionSetFeatures* InstructionSetFeatures::AsArmInstructionSetFeatures() const {
+  DCHECK_EQ(kArm, GetInstructionSet());
+  return down_cast<const ArmInstructionSetFeatures*>(this);
+}
+
+std::ostream& operator<<(std::ostream& os, const InstructionSetFeatures& rhs) {
+  os << "ISA: " << rhs.GetInstructionSet() << " Feature string: " << rhs.GetFeatureString();
+  return os;
+}
+
+const ArmInstructionSetFeatures* ArmInstructionSetFeatures::FromFeatureString(
+    const std::string& feature_list, std::string* error_msg) {
+  std::vector<std::string> features;
+  Split(feature_list, ',', &features);
+  bool has_lpae = false;
+  bool has_div = false;
+  for (auto i = features.begin(); i != features.end(); i++) {
+    std::string feature = Trim(*i);
+    if (feature == "default" || feature == "none") {
+      // Nothing to do.
+    } else if (feature == "div") {
+      has_div = true;
+    } else if (feature == "nodiv") {
+      has_div = false;
+    } else if (feature == "lpae") {
+      has_lpae = true;
+    } else if (feature == "nolpae") {
+      has_lpae = false;
+    } else {
+      *error_msg = StringPrintf("Unknown instruction set feature: '%s'", feature.c_str());
+      return nullptr;
+    }
+  }
+  return new ArmInstructionSetFeatures(has_lpae, has_div);
+}
+
+const ArmInstructionSetFeatures* ArmInstructionSetFeatures::FromVariant(
+    const std::string& variant, std::string* error_msg) {
+  // Look for variants that have divide support.
+  bool has_div = false;
+  {
+    static const char* arm_variants_with_div[] = {
+        "cortex-a7", "cortex-a12", "cortex-a15", "cortex-a17", "cortex-a53", "cortex-a57",
+        "cortex-m3", "cortex-m4", "cortex-r4", "cortex-r5",
+        "cyclone", "denver", "krait", "swift"
+    };
+    for (const char* div_variant : arm_variants_with_div) {
+      if (variant == div_variant) {
+        has_div = true;
+        break;
+      }
+    }
+  }
+  // Look for variants that have LPAE support.
+  bool has_lpae = false;
+  {
+    static const char* arm_variants_with_lpae[] = {
+        "cortex-a7", "cortex-a15", "krait", "denver"
+    };
+    for (const char* lpae_variant : arm_variants_with_lpae) {
+      if (variant == lpae_variant) {
+        has_lpae = true;
+        break;
+      }
+    }
+  }
+  if (has_div == false && has_lpae == false) {
+    // Avoid unsupported variants.
+    static const char* unsupported_arm_variants[] = {
+        // ARM processors that aren't ARMv7 compatible aren't supported.
+        "arm2", "arm250", "arm3", "arm6", "arm60", "arm600", "arm610", "arm620",
+        "cortex-m0", "cortex-m0plus", "cortex-m1",
+        "fa526", "fa626", "fa606te", "fa626te", "fmp626", "fa726te",
+        "iwmmxt", "iwmmxt2",
+        "strongarm", "strongarm110", "strongarm1100", "strongarm1110",
+        "xscale"
+    };
+    for (const char* us_variant : unsupported_arm_variants) {
+      if (variant == us_variant) {
+        *error_msg = StringPrintf("Attempt to use unsupported ARM variant: %s", us_variant);
+        return nullptr;
+      }
+    }
+    // Warn if the variant is unknown.
+    // TODO: some of the variants below may have feature support, but that support is currently
+    //       unknown so we'll choose conservative (sub-optimal) defaults without warning.
+    // TODO: some of the architectures may not support all features required by ART and should be
+    //       moved to unsupported_arm_variants[] above.
+    static const char* arm_variants_without_known_features[] = {
+        "arm7", "arm7m", "arm7d", "arm7dm", "arm7di", "arm7dmi", "arm70", "arm700", "arm700i",
+        "arm710", "arm710c", "arm7100", "arm720", "arm7500", "arm7500fe", "arm7tdmi", "arm7tdmi-s",
+        "arm710t", "arm720t", "arm740t",
+        "arm8", "arm810",
+        "arm9", "arm9e", "arm920", "arm920t", "arm922t", "arm946e-s", "arm966e-s", "arm968e-s",
+        "arm926ej-s", "arm940t", "arm9tdmi",
+        "arm10tdmi", "arm1020t", "arm1026ej-s", "arm10e", "arm1020e", "arm1022e",
+        "arm1136j-s", "arm1136jf-s",
+        "arm1156t2-s", "arm1156t2f-s", "arm1176jz-s", "arm1176jzf-s",
+        "cortex-a5", "cortex-a8", "cortex-a9", "cortex-a9-mp", "cortex-r4f",
+        "marvell-pj4", "mpcore", "mpcorenovfp"
+    };
+    bool found = false;
+    for (const char* ff_variant : arm_variants_without_known_features) {
+      if (variant == ff_variant) {
+        found = true;
+        break;
+      }
+    }
+    if (!found) {
+      LOG(WARNING) << "Unknown instruction set features for ARM CPU variant (" << variant
+          << ") using conservative defaults";
+    }
+  }
+  return new ArmInstructionSetFeatures(has_lpae, has_div);
+}
+
+const ArmInstructionSetFeatures* ArmInstructionSetFeatures::FromBitmap(uint32_t bitmap) {
+  bool has_lpae = (bitmap & kLpaeBitfield) != 0;
+  bool has_div = (bitmap & kDivBitfield) != 0;
+  return new ArmInstructionSetFeatures(has_lpae, has_div);
+}
+
+const ArmInstructionSetFeatures* ArmInstructionSetFeatures::FromCppDefines() {
+#if defined(__ARM_ARCH_EXT_IDIV__)
+  bool has_div = true;
+#else
+  bool has_div = false;
+#endif
+#if defined(__ARM_FEATURE_LPAE)
+  bool has_lpae = true;
+#else
+  bool has_lpae = false;
+#endif
+  return new ArmInstructionSetFeatures(has_lpae, has_div);
+}
+
+const ArmInstructionSetFeatures* ArmInstructionSetFeatures::FromCpuInfo() {
+  // Look in /proc/cpuinfo for features we need.  Only use this when we can guarantee that
+  // the kernel puts the appropriate feature flags in here.  Sometimes it doesn't.
+  bool has_lpae = false;
+  bool has_div = false;
+
+  std::ifstream in("/proc/cpuinfo");
+  if (!in.fail()) {
+    while (!in.eof()) {
+      std::string line;
+      std::getline(in, line);
+      if (!in.eof()) {
+        LOG(INFO) << "cpuinfo line: " << line;
+        if (line.find("Features") != std::string::npos) {
+          LOG(INFO) << "found features";
+          if (line.find("idivt") != std::string::npos) {
+            // We always expect both ARM and Thumb divide instructions to be available or not
+            // available.
+            CHECK_NE(line.find("idiva"), std::string::npos);
+            has_div = true;
+          }
+          if (line.find("lpae") != std::string::npos) {
+            has_lpae = true;
+          }
+        }
+      }
+    }
+    in.close();
+  } else {
+    LOG(INFO) << "Failed to open /proc/cpuinfo";
+  }
+  return new ArmInstructionSetFeatures(has_lpae, has_div);
+}
+
+#if defined(HAVE_ANDROID_OS) && defined(__arm__)
+#include <sys/auxv.h>
+#include <asm/hwcap.h>
+#endif
+
+const ArmInstructionSetFeatures* ArmInstructionSetFeatures::FromHwcap() {
+  bool has_lpae = false;
+  bool has_div = false;
+
+#if defined(HAVE_ANDROID_OS) && defined(__arm__)
+  uint64_t hwcaps = getauxval(AT_HWCAP);
+  LOG(INFO) << "hwcaps=" << hwcaps;
+  if ((hwcaps & HWCAP_IDIVT) != 0) {
+    // We always expect both ARM and Thumb divide instructions to be available or not
+    // available.
+    CHECK_NE(hwcaps & HWCAP_IDIVA, 0U);
+    has_div = true;
+  }
+  if ((hwcaps & HWCAP_LPAE) != 0) {
+    has_lpae = true;
+  }
+#endif
+
+  return new ArmInstructionSetFeatures(has_lpae, has_div);
+}
+
+// A signal handler called by a fault for an illegal instruction.  We record the fact in r0
+// and then increment the PC in the signal context to return to the next instruction.  We know the
+// instruction is an sdiv (4 bytes long).
+static void bad_divide_inst_handle(int signo, siginfo *si, void *data) {
+  UNUSED(signo);
+  UNUSED(si);
+#if defined(__arm__)
+  struct ucontext *uc = (struct ucontext *)data;
+  struct sigcontext *sc = &uc->uc_mcontext;
+  sc->arm_r0 = 0;     // Set R0 to #0 to signal error.
+  sc->arm_pc += 4;    // Skip offending instruction.
+#else
+  UNUSED(data);
+#endif
+}
+
+#if defined(__arm__)
+extern "C" bool artCheckForARMSDIVInstruction();
+#endif
+
+const ArmInstructionSetFeatures* ArmInstructionSetFeatures::FromAssembly() {
+  // See if have a sdiv instruction.  Register a signal handler and try to execute an sdiv
+  // instruction.  If we get a SIGILL then it's not supported.
+  struct sigaction sa, osa;
+  sa.sa_flags = SA_ONSTACK | SA_RESTART | SA_SIGINFO;
+  sa.sa_sigaction = bad_divide_inst_handle;
+  sigaction(SIGILL, &sa, &osa);
+
+  bool has_div = false;
+#if defined(__arm__)
+  if (artCheckForARMSDIVInstruction()) {
+    has_div = true;
+  }
+#endif
+
+  // Restore the signal handler.
+  sigaction(SIGILL, &osa, nullptr);
+
+  // Use compile time features to "detect" LPAE support.
+  // TODO: write an assembly LPAE support test.
+#if defined(__ARM_FEATURE_LPAE)
+  bool has_lpae = true;
+#else
+  bool has_lpae = false;
+#endif
+  return new ArmInstructionSetFeatures(has_lpae, has_div);
+}
+
+
+bool ArmInstructionSetFeatures::Equals(const InstructionSetFeatures* other) const {
+  if (kArm != other->GetInstructionSet()) {
+    return false;
+  }
+  const ArmInstructionSetFeatures* other_as_arm = other->AsArmInstructionSetFeatures();
+  return has_lpae_ == other_as_arm->has_lpae_ && has_div_ == other_as_arm->has_div_;
+}
+
+uint32_t ArmInstructionSetFeatures::AsBitmap() const {
+  return (has_lpae_ ? kLpaeBitfield : 0) | (has_div_ ? kDivBitfield : 0);
+}
+
+std::string ArmInstructionSetFeatures::GetFeatureString() const {
+  std::string result;
+  if (has_div_) {
+    result += ",div";
+  }
+  if (has_lpae_) {
+    result += ",lpae";
+  }
+  if (result.size() == 0) {
+    return "none";
+  } else {
+    // Strip leading comma.
+    return result.substr(1, result.size());
+  }
+}
+
 }  // namespace art
diff --git a/runtime/instruction_set.h b/runtime/instruction_set.h
index de6d0f4..529fa0c 100644
--- a/runtime/instruction_set.h
+++ b/runtime/instruction_set.h
@@ -22,6 +22,7 @@
 
 #include "base/logging.h"  // Logging is required for FATAL in the helper functions.
 #include "base/macros.h"
+#include "base/value_object.h"
 #include "globals.h"       // For KB.
 
 namespace art {
@@ -177,53 +178,163 @@
 
 size_t GetStackOverflowReservedBytes(InstructionSet isa);
 
-enum InstructionFeatures {
-  kHwDiv  = 0x1,              // Supports hardware divide.
-  kHwLpae = 0x2,              // Supports Large Physical Address Extension.
-};
+class ArmInstructionSetFeatures;
 
-// This is a bitmask of supported features per architecture.
-class PACKED(4) InstructionSetFeatures {
+// Abstraction used to describe features of a different instruction sets.
+class InstructionSetFeatures {
  public:
-  InstructionSetFeatures() : mask_(0) {}
-  explicit InstructionSetFeatures(uint32_t mask) : mask_(mask) {}
+  // Process a CPU variant string for the given ISA and create an InstructionSetFeatures.
+  static const InstructionSetFeatures* FromVariant(InstructionSet isa,
+                                                   const std::string& variant,
+                                                   std::string* error_msg);
 
-  static InstructionSetFeatures GuessInstructionSetFeatures();
+  // Parse a string of the form "div,lpae" and create an InstructionSetFeatures.
+  static const InstructionSetFeatures* FromFeatureString(InstructionSet isa,
+                                                         const std::string& feature_list,
+                                                         std::string* error_msg);
 
-  bool HasDivideInstruction() const {
-      return (mask_ & kHwDiv) != 0;
-  }
+  // Parse a bitmap for the given isa and create an InstructionSetFeatures.
+  static const InstructionSetFeatures* FromBitmap(InstructionSet isa, uint32_t bitmap);
 
-  void SetHasDivideInstruction(bool v) {
-    mask_ = (mask_ & ~kHwDiv) | (v ? kHwDiv : 0);
-  }
+  // Turn C pre-processor #defines into the equivalent instruction set features for kRuntimeISA.
+  static const InstructionSetFeatures* FromCppDefines();
 
-  bool HasLpae() const {
-    return (mask_ & kHwLpae) != 0;
-  }
+  // Process /proc/cpuinfo and use kRuntimeISA to produce InstructionSetFeatures.
+  static const InstructionSetFeatures* FromCpuInfo();
 
-  void SetHasLpae(bool v) {
-    mask_ = (mask_ & ~kHwLpae) | (v ? kHwLpae : 0);
-  }
+  // Process the auxiliary vector AT_HWCAP entry and use kRuntimeISA to produce
+  // InstructionSetFeatures.
+  static const InstructionSetFeatures* FromHwcap();
 
-  std::string GetFeatureString() const;
+  // Use assembly tests of the current runtime (ie kRuntimeISA) to determine the
+  // InstructionSetFeatures. This works around kernel bugs in AT_HWCAP and /proc/cpuinfo.
+  static const InstructionSetFeatures* FromAssembly();
 
-  // Other features in here.
+  // Are these features the same as the other given features?
+  virtual bool Equals(const InstructionSetFeatures* other) const = 0;
 
-  bool operator==(const InstructionSetFeatures &peer) const {
-    return mask_ == peer.mask_;
-  }
+  // Return the ISA these features relate to.
+  virtual InstructionSet GetInstructionSet() const = 0;
 
-  bool operator!=(const InstructionSetFeatures &peer) const {
-    return mask_ != peer.mask_;
-  }
+  // Return a bitmap that represents the features. ISA specific.
+  virtual uint32_t AsBitmap() const = 0;
 
-  bool operator<=(const InstructionSetFeatures &peer) const {
-    return (mask_ & peer.mask_) == mask_;
-  }
+  // Return a string of the form "div,lpae" or "none".
+  virtual std::string GetFeatureString() const = 0;
+
+  // Down cast this ArmInstructionFeatures.
+  const ArmInstructionSetFeatures* AsArmInstructionSetFeatures() const;
+
+  virtual ~InstructionSetFeatures() {}
+
+ protected:
+  InstructionSetFeatures() {}
 
  private:
-  uint32_t mask_;
+  DISALLOW_COPY_AND_ASSIGN(InstructionSetFeatures);
+};
+std::ostream& operator<<(std::ostream& os, const InstructionSetFeatures& rhs);
+
+// Instruction set features relevant to the ARM architecture.
+class ArmInstructionSetFeatures FINAL : public InstructionSetFeatures {
+ public:
+  // Process a CPU variant string like "krait" or "cortex-a15" and create InstructionSetFeatures.
+  static const ArmInstructionSetFeatures* FromVariant(const std::string& variant,
+                                                      std::string* error_msg);
+
+  // Parse a string of the form "div,lpae" and create an InstructionSetFeatures.
+  static const ArmInstructionSetFeatures* FromFeatureString(const std::string& feature_list,
+                                                            std::string* error_msg);
+
+  // Parse a bitmap and create an InstructionSetFeatures.
+  static const ArmInstructionSetFeatures* FromBitmap(uint32_t bitmap);
+
+  // Turn C pre-processor #defines into the equivalent instruction set features.
+  static const ArmInstructionSetFeatures* FromCppDefines();
+
+  // Process /proc/cpuinfo and use kRuntimeISA to produce InstructionSetFeatures.
+  static const ArmInstructionSetFeatures* FromCpuInfo();
+
+  // Process the auxiliary vector AT_HWCAP entry and use kRuntimeISA to produce
+  // InstructionSetFeatures.
+  static const ArmInstructionSetFeatures* FromHwcap();
+
+  // Use assembly tests of the current runtime (ie kRuntimeISA) to determine the
+  // InstructionSetFeatures. This works around kernel bugs in AT_HWCAP and /proc/cpuinfo.
+  static const ArmInstructionSetFeatures* FromAssembly();
+
+  bool Equals(const InstructionSetFeatures* other) const OVERRIDE;
+
+  InstructionSet GetInstructionSet() const OVERRIDE {
+    return kArm;
+  }
+
+  uint32_t AsBitmap() const OVERRIDE;
+
+  // Return a string of the form "div,lpae" or "none".
+  std::string GetFeatureString() const OVERRIDE;
+
+  // Is the divide instruction feature enabled?
+  bool HasDivideInstruction() const {
+      return has_div_;
+  }
+
+  // Is the Large Physical Address Extension (LPAE) instruction feature enabled? When true code can
+  // be used that assumes double register loads and stores (ldrd, strd) don't tear.
+  bool HasLpae() const {
+    return has_lpae_;
+  }
+
+  virtual ~ArmInstructionSetFeatures() {}
+
+ private:
+  ArmInstructionSetFeatures(bool has_lpae, bool has_div)
+      : has_lpae_(has_lpae), has_div_(has_div) {
+  }
+
+  // Bitmap positions for encoding features as a bitmap.
+  enum {
+    kDivBitfield = 1,
+    kLpaeBitfield = 2,
+  };
+
+  const bool has_lpae_;
+  const bool has_div_;
+
+  DISALLOW_COPY_AND_ASSIGN(ArmInstructionSetFeatures);
+};
+
+// A class used for instruction set features on ISAs that don't yet have any features defined.
+class UnknownInstructionSetFeatures FINAL : public InstructionSetFeatures {
+ public:
+  static const UnknownInstructionSetFeatures* Unknown(InstructionSet isa) {
+    return new UnknownInstructionSetFeatures(isa);
+  }
+
+  bool Equals(const InstructionSetFeatures* other) const OVERRIDE {
+    return isa_ == other->GetInstructionSet();
+  }
+
+  InstructionSet GetInstructionSet() const OVERRIDE {
+    return isa_;
+  }
+
+  uint32_t AsBitmap() const OVERRIDE {
+    return 0;
+  }
+
+  std::string GetFeatureString() const OVERRIDE {
+    return "none";
+  }
+
+  virtual ~UnknownInstructionSetFeatures() {}
+
+ private:
+  explicit UnknownInstructionSetFeatures(InstructionSet isa) : isa_(isa) {}
+
+  const InstructionSet isa_;
+
+  DISALLOW_COPY_AND_ASSIGN(UnknownInstructionSetFeatures);
 };
 
 // The following definitions create return types for two word-sized entities that will be passed
diff --git a/runtime/instruction_set_test.cc b/runtime/instruction_set_test.cc
index 80191b1..948063d 100644
--- a/runtime/instruction_set_test.cc
+++ b/runtime/instruction_set_test.cc
@@ -16,6 +16,7 @@
 
 #include "instruction_set.h"
 
+#include "base/stringprintf.h"
 #include "common_runtime_test.h"
 
 namespace art {
@@ -50,4 +51,214 @@
   EXPECT_EQ(sizeof(void*), GetInstructionSetPointerSize(kRuntimeISA));
 }
 
+TEST_F(InstructionSetTest, X86Features) {
+  // Build features for a 32-bit x86 atom processor.
+  std::string error_msg;
+  std::unique_ptr<const InstructionSetFeatures> x86_features(
+      InstructionSetFeatures::FromVariant(kX86, "atom", &error_msg));
+  ASSERT_TRUE(x86_features.get() != nullptr) << error_msg;
+  EXPECT_EQ(x86_features->GetInstructionSet(), kX86);
+  EXPECT_TRUE(x86_features->Equals(x86_features.get()));
+  EXPECT_STREQ("none", x86_features->GetFeatureString().c_str());
+  EXPECT_EQ(x86_features->AsBitmap(), 0U);
+
+  // Build features for a 32-bit x86 default processor.
+  std::unique_ptr<const InstructionSetFeatures> x86_default_features(
+      InstructionSetFeatures::FromFeatureString(kX86, "default", &error_msg));
+  ASSERT_TRUE(x86_default_features.get() != nullptr) << error_msg;
+  EXPECT_EQ(x86_default_features->GetInstructionSet(), kX86);
+  EXPECT_TRUE(x86_default_features->Equals(x86_default_features.get()));
+  EXPECT_STREQ("none", x86_default_features->GetFeatureString().c_str());
+  EXPECT_EQ(x86_default_features->AsBitmap(), 0U);
+
+  // Build features for a 64-bit x86-64 atom processor.
+  std::unique_ptr<const InstructionSetFeatures> x86_64_features(
+      InstructionSetFeatures::FromVariant(kX86_64, "atom", &error_msg));
+  ASSERT_TRUE(x86_64_features.get() != nullptr) << error_msg;
+  EXPECT_EQ(x86_64_features->GetInstructionSet(), kX86_64);
+  EXPECT_TRUE(x86_64_features->Equals(x86_64_features.get()));
+  EXPECT_STREQ("none", x86_64_features->GetFeatureString().c_str());
+  EXPECT_EQ(x86_64_features->AsBitmap(), 0U);
+
+  EXPECT_FALSE(x86_64_features->Equals(x86_features.get()));
+  EXPECT_FALSE(x86_64_features->Equals(x86_default_features.get()));
+  EXPECT_TRUE(x86_features->Equals(x86_default_features.get()));
+}
+
+TEST_F(InstructionSetTest, ArmFeaturesFromVariant) {
+  // Build features for a 32-bit ARM krait processor.
+  std::string error_msg;
+  std::unique_ptr<const InstructionSetFeatures> krait_features(
+      InstructionSetFeatures::FromVariant(kArm, "krait", &error_msg));
+  ASSERT_TRUE(krait_features.get() != nullptr) << error_msg;
+
+  ASSERT_EQ(krait_features->GetInstructionSet(), kArm);
+  EXPECT_TRUE(krait_features->Equals(krait_features.get()));
+  EXPECT_TRUE(krait_features->AsArmInstructionSetFeatures()->HasDivideInstruction());
+  EXPECT_TRUE(krait_features->AsArmInstructionSetFeatures()->HasLpae());
+  EXPECT_STREQ("div,lpae", krait_features->GetFeatureString().c_str());
+  EXPECT_EQ(krait_features->AsBitmap(), 3U);
+
+  // Build features for a 32-bit ARM denver processor.
+  std::unique_ptr<const InstructionSetFeatures> denver_features(
+      InstructionSetFeatures::FromVariant(kArm, "denver", &error_msg));
+  ASSERT_TRUE(denver_features.get() != nullptr) << error_msg;
+
+  EXPECT_TRUE(denver_features->Equals(denver_features.get()));
+  EXPECT_TRUE(denver_features->Equals(krait_features.get()));
+  EXPECT_TRUE(krait_features->Equals(denver_features.get()));
+  EXPECT_TRUE(denver_features->AsArmInstructionSetFeatures()->HasDivideInstruction());
+  EXPECT_TRUE(denver_features->AsArmInstructionSetFeatures()->HasLpae());
+  EXPECT_STREQ("div,lpae", denver_features->GetFeatureString().c_str());
+  EXPECT_EQ(denver_features->AsBitmap(), 3U);
+
+  // Build features for a 32-bit ARMv7 processor.
+  std::unique_ptr<const InstructionSetFeatures> arm7_features(
+      InstructionSetFeatures::FromVariant(kArm, "arm7", &error_msg));
+  ASSERT_TRUE(arm7_features.get() != nullptr) << error_msg;
+
+  EXPECT_TRUE(arm7_features->Equals(arm7_features.get()));
+  EXPECT_FALSE(arm7_features->Equals(krait_features.get()));
+  EXPECT_FALSE(krait_features->Equals(arm7_features.get()));
+  EXPECT_FALSE(arm7_features->AsArmInstructionSetFeatures()->HasDivideInstruction());
+  EXPECT_FALSE(arm7_features->AsArmInstructionSetFeatures()->HasLpae());
+  EXPECT_STREQ("none", arm7_features->GetFeatureString().c_str());
+  EXPECT_EQ(arm7_features->AsBitmap(), 0U);
+
+  // ARM6 is not a supported architecture variant.
+  std::unique_ptr<const InstructionSetFeatures> arm6_features(
+      InstructionSetFeatures::FromVariant(kArm, "arm6", &error_msg));
+  EXPECT_TRUE(arm6_features.get() == nullptr);
+  EXPECT_NE(error_msg.size(), 0U);
+}
+
+TEST_F(InstructionSetTest, ArmFeaturesFromString) {
+  // Build features for a 32-bit ARM with LPAE and div processor.
+  std::string error_msg;
+  std::unique_ptr<const InstructionSetFeatures> krait_features(
+      InstructionSetFeatures::FromFeatureString(kArm, "lpae,div", &error_msg));
+  ASSERT_TRUE(krait_features.get() != nullptr) << error_msg;
+
+  ASSERT_EQ(krait_features->GetInstructionSet(), kArm);
+  EXPECT_TRUE(krait_features->Equals(krait_features.get()));
+  EXPECT_TRUE(krait_features->AsArmInstructionSetFeatures()->HasDivideInstruction());
+  EXPECT_TRUE(krait_features->AsArmInstructionSetFeatures()->HasLpae());
+  EXPECT_STREQ("div,lpae", krait_features->GetFeatureString().c_str());
+  EXPECT_EQ(krait_features->AsBitmap(), 3U);
+
+  // Build features for a 32-bit ARM processor with LPAE and div flipped.
+  std::unique_ptr<const InstructionSetFeatures> denver_features(
+      InstructionSetFeatures::FromFeatureString(kArm, "div,lpae", &error_msg));
+  ASSERT_TRUE(denver_features.get() != nullptr) << error_msg;
+
+  EXPECT_TRUE(denver_features->Equals(denver_features.get()));
+  EXPECT_TRUE(denver_features->Equals(krait_features.get()));
+  EXPECT_TRUE(krait_features->Equals(denver_features.get()));
+  EXPECT_TRUE(denver_features->AsArmInstructionSetFeatures()->HasDivideInstruction());
+  EXPECT_TRUE(denver_features->AsArmInstructionSetFeatures()->HasLpae());
+  EXPECT_STREQ("div,lpae", denver_features->GetFeatureString().c_str());
+  EXPECT_EQ(denver_features->AsBitmap(), 3U);
+
+  // Build features for a 32-bit default ARM processor.
+  std::unique_ptr<const InstructionSetFeatures> arm7_features(
+      InstructionSetFeatures::FromFeatureString(kArm, "default", &error_msg));
+  ASSERT_TRUE(arm7_features.get() != nullptr) << error_msg;
+
+  EXPECT_TRUE(arm7_features->Equals(arm7_features.get()));
+  EXPECT_FALSE(arm7_features->Equals(krait_features.get()));
+  EXPECT_FALSE(krait_features->Equals(arm7_features.get()));
+  EXPECT_FALSE(arm7_features->AsArmInstructionSetFeatures()->HasDivideInstruction());
+  EXPECT_FALSE(arm7_features->AsArmInstructionSetFeatures()->HasLpae());
+  EXPECT_STREQ("none", arm7_features->GetFeatureString().c_str());
+  EXPECT_EQ(arm7_features->AsBitmap(), 0U);
+}
+
+#ifdef HAVE_ANDROID_OS
+#include "cutils/properties.h"
+
+TEST_F(InstructionSetTest, FeaturesFromSystemPropertyVariant) {
+  // Take the default set of instruction features from the build.
+  std::unique_ptr<const InstructionSetFeatures> instruction_set_features(
+      InstructionSetFeatures::FromCppDefines());
+
+  // Read the features property.
+  std::string key = StringPrintf("dalvik.vm.isa.%s.variant", GetInstructionSetString(kRuntimeISA));
+  char dex2oat_isa_variant[PROPERTY_VALUE_MAX];
+  if (property_get(key.c_str(), dex2oat_isa_variant, nullptr) > 0) {
+    // Use features from property to build InstructionSetFeatures and check against build's
+    // features.
+    std::string error_msg;
+    std::unique_ptr<const InstructionSetFeatures> property_features(
+        InstructionSetFeatures::FromVariant(kRuntimeISA, dex2oat_isa_variant, &error_msg));
+    ASSERT_TRUE(property_features.get() != nullptr) << error_msg;
+
+    EXPECT_TRUE(property_features->Equals(instruction_set_features.get()))
+      << "System property features: " << *property_features.get()
+      << "\nFeatures from build: " << *instruction_set_features.get();
+  }
+}
+
+TEST_F(InstructionSetTest, FeaturesFromSystemPropertyString) {
+  // Take the default set of instruction features from the build.
+  std::unique_ptr<const InstructionSetFeatures> instruction_set_features(
+      InstructionSetFeatures::FromCppDefines());
+
+  // Read the features property.
+  std::string key = StringPrintf("dalvik.vm.isa.%s.features", GetInstructionSetString(kRuntimeISA));
+  char dex2oat_isa_features[PROPERTY_VALUE_MAX];
+  if (property_get(key.c_str(), dex2oat_isa_features, nullptr) > 0) {
+    // Use features from property to build InstructionSetFeatures and check against build's
+    // features.
+    std::string error_msg;
+    std::unique_ptr<const InstructionSetFeatures> property_features(
+        InstructionSetFeatures::FromFeatureString(kRuntimeISA, dex2oat_isa_features, &error_msg));
+    ASSERT_TRUE(property_features.get() != nullptr) << error_msg;
+
+    EXPECT_TRUE(property_features->Equals(instruction_set_features.get()))
+      << "System property features: " << *property_features.get()
+      << "\nFeatures from build: " << *instruction_set_features.get();
+  }
+}
+#endif
+
+TEST_F(InstructionSetTest, FeaturesFromCpuInfo) {
+  // Take the default set of instruction features from the build.
+  std::unique_ptr<const InstructionSetFeatures> instruction_set_features(
+      InstructionSetFeatures::FromCppDefines());
+
+  // Check we get the same instruction set features using /proc/cpuinfo.
+  std::unique_ptr<const InstructionSetFeatures> cpuinfo_features(
+      InstructionSetFeatures::FromCpuInfo());
+  EXPECT_TRUE(cpuinfo_features->Equals(instruction_set_features.get()))
+      << "CPU Info features: " << *cpuinfo_features.get()
+      << "\nFeatures from build: " << *instruction_set_features.get();
+}
+
+TEST_F(InstructionSetTest, FeaturesFromHwcap) {
+  // Take the default set of instruction features from the build.
+  std::unique_ptr<const InstructionSetFeatures> instruction_set_features(
+      InstructionSetFeatures::FromCppDefines());
+
+  // Check we get the same instruction set features using AT_HWCAP.
+  std::unique_ptr<const InstructionSetFeatures> hwcap_features(
+      InstructionSetFeatures::FromHwcap());
+  EXPECT_TRUE(hwcap_features->Equals(instruction_set_features.get()))
+      << "Hwcap features: " << *hwcap_features.get()
+      << "\nFeatures from build: " << *instruction_set_features.get();
+}
+
+
+TEST_F(InstructionSetTest, FeaturesFromAssembly) {
+  // Take the default set of instruction features from the build.
+  std::unique_ptr<const InstructionSetFeatures> instruction_set_features(
+      InstructionSetFeatures::FromCppDefines());
+
+  // Check we get the same instruction set features using assembly tests.
+  std::unique_ptr<const InstructionSetFeatures> assembly_features(
+      InstructionSetFeatures::FromAssembly());
+  EXPECT_TRUE(assembly_features->Equals(instruction_set_features.get()))
+      << "Assembly features: " << *assembly_features.get()
+      << "\nFeatures from build: " << *instruction_set_features.get();
+}
+
 }  // namespace art
diff --git a/runtime/instrumentation.cc b/runtime/instrumentation.cc
index 15be6b7..6c6058f 100644
--- a/runtime/instrumentation.cc
+++ b/runtime/instrumentation.cc
@@ -25,6 +25,7 @@
 #include "debugger.h"
 #include "dex_file-inl.h"
 #include "entrypoints/quick/quick_alloc_entrypoints.h"
+#include "entrypoints/runtime_asm_entrypoints.h"
 #include "gc_root-inl.h"
 #include "interpreter/interpreter.h"
 #include "mirror/art_method-inl.h"
@@ -95,21 +96,20 @@
   }
   if (!method->IsResolutionMethod()) {
     ClassLinker* class_linker = Runtime::Current()->GetClassLinker();
-    if (quick_code == GetQuickToInterpreterBridge() ||
-        quick_code == class_linker->GetQuickToInterpreterBridgeTrampoline() ||
-        (quick_code == class_linker->GetQuickResolutionTrampoline() &&
-         Runtime::Current()->GetInstrumentation()->IsForcedInterpretOnly()
-         && !method->IsNative() && !method->IsProxyMethod())) {
+    if (class_linker->IsQuickToInterpreterBridge(quick_code) ||
+        (class_linker->IsQuickResolutionStub(quick_code) &&
+         Runtime::Current()->GetInstrumentation()->IsForcedInterpretOnly() &&
+         !method->IsNative() && !method->IsProxyMethod())) {
       if (kIsDebugBuild) {
         if (quick_code == GetQuickToInterpreterBridge()) {
           DCHECK(portable_code == GetPortableToInterpreterBridge());
-        } else if (quick_code == class_linker->GetQuickResolutionTrampoline()) {
-          DCHECK(portable_code == class_linker->GetPortableResolutionTrampoline());
+        } else if (class_linker->IsQuickResolutionStub(quick_code)) {
+          DCHECK(class_linker->IsPortableResolutionStub(portable_code));
         }
       }
       DCHECK(!method->IsNative()) << PrettyMethod(method);
       DCHECK(!method->IsProxyMethod()) << PrettyMethod(method);
-      method->SetEntryPointFromInterpreter(art::interpreter::artInterpreterToInterpreterBridge);
+      method->SetEntryPointFromInterpreter(art::artInterpreterToInterpreterBridge);
     } else {
       method->SetEntryPointFromInterpreter(art::artInterpreterToCompiledCodeBridge);
     }
@@ -140,8 +140,8 @@
       new_portable_code = class_linker->GetPortableOatCodeFor(method, &have_portable_code);
       new_quick_code = class_linker->GetQuickOatCodeFor(method);
     } else {
-      new_portable_code = class_linker->GetPortableResolutionTrampoline();
-      new_quick_code = class_linker->GetQuickResolutionTrampoline();
+      new_portable_code = GetPortableResolutionStub();
+      new_quick_code = GetQuickResolutionStub();
     }
   } else {  // !uninstall
     if ((interpreter_stubs_installed_ || forced_interpret_only_ || IsDeoptimized(method)) &&
@@ -159,11 +159,11 @@
         } else {
           new_portable_code = class_linker->GetPortableOatCodeFor(method, &have_portable_code);
           new_quick_code = class_linker->GetQuickOatCodeFor(method);
-          DCHECK(new_quick_code != class_linker->GetQuickToInterpreterBridgeTrampoline());
+          DCHECK(!class_linker->IsQuickToInterpreterBridge(new_quick_code));
         }
       } else {
-        new_portable_code = class_linker->GetPortableResolutionTrampoline();
-        new_quick_code = class_linker->GetQuickResolutionTrampoline();
+        new_portable_code = GetPortableResolutionStub();
+        new_quick_code = GetQuickResolutionStub();
       }
     }
   }
@@ -287,7 +287,7 @@
 
   Instrumentation* instrumentation = reinterpret_cast<Instrumentation*>(arg);
   std::unique_ptr<Context> context(Context::Create());
-  uintptr_t instrumentation_exit_pc = GetQuickInstrumentationExitPc();
+  uintptr_t instrumentation_exit_pc = reinterpret_cast<uintptr_t>(GetQuickInstrumentationExitPc());
   InstallStackVisitor visitor(thread, context.get(), instrumentation_exit_pc);
   visitor.WalkStack(true);
   CHECK_EQ(visitor.dex_pcs_.size(), thread->GetInstrumentationStack()->size());
@@ -388,7 +388,8 @@
   std::deque<instrumentation::InstrumentationStackFrame>* stack = thread->GetInstrumentationStack();
   if (stack->size() > 0) {
     Instrumentation* instrumentation = reinterpret_cast<Instrumentation*>(arg);
-    uintptr_t instrumentation_exit_pc = GetQuickInstrumentationExitPc();
+    uintptr_t instrumentation_exit_pc =
+        reinterpret_cast<uintptr_t>(GetQuickInstrumentationExitPc());
     RestoreStackVisitor visitor(thread, instrumentation_exit_pc, instrumentation);
     visitor.WalkStack(true);
     CHECK_EQ(visitor.frames_removed_, stack->size());
@@ -669,11 +670,10 @@
       new_have_portable_code = false;
     } else {
       ClassLinker* class_linker = Runtime::Current()->GetClassLinker();
-      if (quick_code == class_linker->GetQuickResolutionTrampoline() ||
-          quick_code == class_linker->GetQuickToInterpreterBridgeTrampoline() ||
-          quick_code == GetQuickToInterpreterBridge()) {
-        DCHECK((portable_code == class_linker->GetPortableResolutionTrampoline()) ||
-               (portable_code == GetPortableToInterpreterBridge()));
+      if (class_linker->IsQuickResolutionStub(quick_code) ||
+          class_linker->IsQuickToInterpreterBridge(quick_code)) {
+        DCHECK(class_linker->IsPortableResolutionStub(portable_code) ||
+               class_linker->IsPortableToInterpreterBridge(portable_code));
         new_portable_code = portable_code;
         new_quick_code = quick_code;
         new_have_portable_code = have_portable_code;
@@ -793,9 +793,7 @@
     ClassLinker* class_linker = Runtime::Current()->GetClassLinker();
     if (method->IsStatic() && !method->IsConstructor() &&
         !method->GetDeclaringClass()->IsInitialized()) {
-      // TODO: we're updating to entrypoints in the image here, we can avoid the trampoline.
-      UpdateEntrypoints(method, class_linker->GetQuickResolutionTrampoline(),
-                        class_linker->GetPortableResolutionTrampoline(), false);
+      UpdateEntrypoints(method, GetQuickResolutionStub(), GetPortableResolutionStub(), false);
     } else {
       bool have_portable_code = false;
       const void* quick_code = class_linker->GetQuickOatCodeFor(method);
@@ -877,9 +875,10 @@
     const void* code = method->GetEntryPointFromQuickCompiledCode();
     DCHECK(code != nullptr);
     ClassLinker* class_linker = runtime->GetClassLinker();
-    if (LIKELY(code != class_linker->GetQuickResolutionTrampoline()) &&
-        LIKELY(code != class_linker->GetQuickToInterpreterBridgeTrampoline()) &&
-        LIKELY(code != GetQuickToInterpreterBridge())) {
+    if (LIKELY(!class_linker->IsQuickResolutionStub(code) &&
+               !class_linker->IsQuickToInterpreterBridge(code)) &&
+               !class_linker->IsQuickResolutionStub(code) &&
+               !class_linker->IsQuickToInterpreterBridge(code)) {
       return code;
     }
   }
diff --git a/runtime/interpreter/interpreter.cc b/runtime/interpreter/interpreter.cc
index 8fb1712..dfb03cd 100644
--- a/runtime/interpreter/interpreter.cc
+++ b/runtime/interpreter/interpreter.cc
@@ -507,8 +507,9 @@
   ret_val->SetJ(value.GetJ());
 }
 
-JValue EnterInterpreterFromStub(Thread* self, MethodHelper& mh, const DexFile::CodeItem* code_item,
-                                ShadowFrame& shadow_frame) {
+JValue EnterInterpreterFromEntryPoint(Thread* self, MethodHelper* mh,
+                                      const DexFile::CodeItem* code_item,
+                                      ShadowFrame* shadow_frame) {
   DCHECK_EQ(self, Thread::Current());
   bool implicit_check = !Runtime::Current()->ExplicitStackOverflowChecks();
   if (UNLIKELY(__builtin_frame_address(0) < self->GetStackEndForInterpreter(implicit_check))) {
@@ -516,10 +517,10 @@
     return JValue();
   }
 
-  return Execute(self, mh, code_item, shadow_frame, JValue());
+  return Execute(self, *mh, code_item, *shadow_frame, JValue());
 }
 
-extern "C" void artInterpreterToInterpreterBridge(Thread* self, MethodHelper& mh,
+extern "C" void artInterpreterToInterpreterBridge(Thread* self, MethodHelper* mh,
                                                   const DexFile::CodeItem* code_item,
                                                   ShadowFrame* shadow_frame, JValue* result) {
   bool implicit_check = !Runtime::Current()->ExplicitStackOverflowChecks();
@@ -529,10 +530,10 @@
   }
 
   self->PushShadowFrame(shadow_frame);
-  DCHECK_EQ(shadow_frame->GetMethod(), mh.Get());
+  DCHECK_EQ(shadow_frame->GetMethod(), mh->Get());
   // Ensure static methods are initialized.
-  if (mh.Get()->IsStatic()) {
-    mirror::Class* declaring_class = mh.Get()->GetDeclaringClass();
+  if (mh->Get()->IsStatic()) {
+    mirror::Class* declaring_class = mh->Get()->GetDeclaringClass();
     if (UNLIKELY(!declaring_class->IsInitialized())) {
       StackHandleScope<1> hs(self);
       HandleWrapper<Class> h_declaring_class(hs.NewHandleWrapper(&declaring_class));
@@ -546,15 +547,15 @@
     }
   }
 
-  if (LIKELY(!mh.Get()->IsNative())) {
-    result->SetJ(Execute(self, mh, code_item, *shadow_frame, JValue()).GetJ());
+  if (LIKELY(!mh->Get()->IsNative())) {
+    result->SetJ(Execute(self, *mh, code_item, *shadow_frame, JValue()).GetJ());
   } else {
     // We don't expect to be asked to interpret native code (which is entered via a JNI compiler
     // generated stub) except during testing and image writing.
     CHECK(!Runtime::Current()->IsStarted());
-    Object* receiver = mh.Get()->IsStatic() ? nullptr : shadow_frame->GetVRegReference(0);
-    uint32_t* args = shadow_frame->GetVRegArgs(mh.Get()->IsStatic() ? 0 : 1);
-    UnstartedRuntimeJni(self, mh.Get(), receiver, args, result);
+    Object* receiver = mh->Get()->IsStatic() ? nullptr : shadow_frame->GetVRegReference(0);
+    uint32_t* args = shadow_frame->GetVRegArgs(mh->Get()->IsStatic() ? 0 : 1);
+    UnstartedRuntimeJni(self, mh->Get(), receiver, args, result);
   }
 
   self->PopShadowFrame();
diff --git a/runtime/interpreter/interpreter.h b/runtime/interpreter/interpreter.h
index 0750eb5..d327a71 100644
--- a/runtime/interpreter/interpreter.h
+++ b/runtime/interpreter/interpreter.h
@@ -42,19 +42,20 @@
                                            JValue* ret_val)
     SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
 
-extern JValue EnterInterpreterFromStub(Thread* self, MethodHelper& mh,
-                                       const DexFile::CodeItem* code_item,
-                                       ShadowFrame& shadow_frame)
+extern JValue EnterInterpreterFromEntryPoint(Thread* self, MethodHelper* mh,
+                                             const DexFile::CodeItem* code_item,
+                                             ShadowFrame* shadow_frame)
     SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
 
-extern "C" void artInterpreterToInterpreterBridge(Thread* self, MethodHelper& mh,
+
+}  // namespace interpreter
+
+extern "C" void artInterpreterToInterpreterBridge(Thread* self, MethodHelper* mh,
                                                   const DexFile::CodeItem* code_item,
                                                   ShadowFrame* shadow_frame, JValue* result)
     SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
 
-}  // namespace interpreter
-
-extern "C" void artInterpreterToCompiledCodeBridge(Thread* self, MethodHelper& mh,
+extern "C" void artInterpreterToCompiledCodeBridge(Thread* self, MethodHelper* mh,
                                                    const DexFile::CodeItem* code_item,
                                                    ShadowFrame* shadow_frame, JValue* result)
     SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
diff --git a/runtime/interpreter/interpreter_common.cc b/runtime/interpreter/interpreter_common.cc
index 52583ae..3ccdd03 100644
--- a/runtime/interpreter/interpreter_common.cc
+++ b/runtime/interpreter/interpreter_common.cc
@@ -506,7 +506,7 @@
   exit(0);  // Unreachable, keep GCC happy.
 }
 
-static void UnstartedRuntimeInvoke(Thread* self, MethodHelper& mh,
+static void UnstartedRuntimeInvoke(Thread* self, MethodHelper* mh,
                                    const DexFile::CodeItem* code_item, ShadowFrame* shadow_frame,
                                    JValue* result, size_t arg_offset)
     SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
@@ -666,9 +666,9 @@
         mh.Get()->GetEntryPointFromInterpreter() == artInterpreterToCompiledCodeBridge) {
       LOG(FATAL) << "Attempt to call compiled code when -Xint: " << PrettyMethod(mh.Get());
     }
-    (mh.Get()->GetEntryPointFromInterpreter())(self, mh, code_item, new_shadow_frame, result);
+    (mh.Get()->GetEntryPointFromInterpreter())(self, &mh, code_item, new_shadow_frame, result);
   } else {
-    UnstartedRuntimeInvoke(self, mh, code_item, new_shadow_frame, result, first_dest_reg);
+    UnstartedRuntimeInvoke(self, &mh, code_item, new_shadow_frame, result, first_dest_reg);
   }
   return !self->IsExceptionPending();
 }
@@ -809,7 +809,7 @@
   result->SetL(found);
 }
 
-static void UnstartedRuntimeInvoke(Thread* self, MethodHelper& mh,
+static void UnstartedRuntimeInvoke(Thread* self, MethodHelper* mh,
                                    const DexFile::CodeItem* code_item, ShadowFrame* shadow_frame,
                                    JValue* result, size_t arg_offset) {
   // In a runtime that's not started we intercept certain methods to avoid complicated dependency
diff --git a/runtime/jdwp/jdwp_handler.cc b/runtime/jdwp/jdwp_handler.cc
index 16a774f..7fdc18e 100644
--- a/runtime/jdwp/jdwp_handler.cc
+++ b/runtime/jdwp/jdwp_handler.cc
@@ -329,14 +329,14 @@
   expandBufAddUtf8String(pReply, "/");
 
   std::vector<std::string> class_path;
-  Split(Runtime::Current()->GetClassPathString(), ':', class_path);
+  Split(Runtime::Current()->GetClassPathString(), ':', &class_path);
   expandBufAdd4BE(pReply, class_path.size());
   for (size_t i = 0; i < class_path.size(); ++i) {
     expandBufAddUtf8String(pReply, class_path[i]);
   }
 
   std::vector<std::string> boot_class_path;
-  Split(Runtime::Current()->GetBootClassPathString(), ':', boot_class_path);
+  Split(Runtime::Current()->GetBootClassPathString(), ':', &boot_class_path);
   expandBufAdd4BE(pReply, boot_class_path.size());
   for (size_t i = 0; i < boot_class_path.size(); ++i) {
     expandBufAddUtf8String(pReply, boot_class_path[i]);
diff --git a/runtime/jni_internal.cc b/runtime/jni_internal.cc
index dea3014..e098e11 100644
--- a/runtime/jni_internal.cc
+++ b/runtime/jni_internal.cc
@@ -2144,7 +2144,7 @@
 
       VLOG(jni) << "[Registering JNI native method " << PrettyMethod(m) << "]";
 
-      m->RegisterNative(soa.Self(), fnPtr, is_fast);
+      m->RegisterNative(fnPtr, is_fast);
     }
     return JNI_OK;
   }
@@ -2160,14 +2160,14 @@
     for (size_t i = 0; i < c->NumDirectMethods(); ++i) {
       mirror::ArtMethod* m = c->GetDirectMethod(i);
       if (m->IsNative()) {
-        m->UnregisterNative(soa.Self());
+        m->UnregisterNative();
         unregistered_count++;
       }
     }
     for (size_t i = 0; i < c->NumVirtualMethods(); ++i) {
       mirror::ArtMethod* m = c->GetVirtualMethod(i);
       if (m->IsNative()) {
-        m->UnregisterNative(soa.Self());
+        m->UnregisterNative();
         unregistered_count++;
       }
     }
diff --git a/runtime/mirror/art_method-inl.h b/runtime/mirror/art_method-inl.h
index 1a65d99..664a412 100644
--- a/runtime/mirror/art_method-inl.h
+++ b/runtime/mirror/art_method-inl.h
@@ -24,7 +24,6 @@
 #include "class_linker.h"
 #include "dex_cache.h"
 #include "dex_file.h"
-#include "entrypoints/entrypoint_utils.h"
 #include "method_helper.h"
 #include "object-inl.h"
 #include "object_array.h"
@@ -176,32 +175,6 @@
   }
 }
 
-inline void ArtMethod::AssertPcIsWithinQuickCode(uintptr_t pc) {
-  if (!kIsDebugBuild) {
-    return;
-  }
-  if (IsNative() || IsRuntimeMethod() || IsProxyMethod()) {
-    return;
-  }
-  if (pc == GetQuickInstrumentationExitPc()) {
-    return;
-  }
-  const void* code = GetEntryPointFromQuickCompiledCode();
-  if (code == GetQuickToInterpreterBridge() || code == GetQuickInstrumentationEntryPoint()) {
-    return;
-  }
-  ClassLinker* class_linker = Runtime::Current()->GetClassLinker();
-  if (code == class_linker->GetQuickResolutionTrampoline() ||
-      code == class_linker->GetQuickToInterpreterBridgeTrampoline()) {
-    return;
-  }
-  DCHECK(IsWithinQuickCode(pc))
-      << PrettyMethod(this)
-      << " pc=" << std::hex << pc
-      << " code=" << code
-      << " size=" << GetCodeSize();
-}
-
 inline uint32_t ArtMethod::GetQuickOatCodeOffset() {
   DCHECK(!Runtime::Current()->IsStarted());
   return PointerToLowMemUInt32(GetEntryPointFromQuickCompiledCode());
@@ -222,27 +195,6 @@
   SetEntryPointFromPortableCompiledCode(reinterpret_cast<void*>(code_offset));
 }
 
-inline const void* ArtMethod::GetQuickOatEntryPoint() {
-  if (IsPortableCompiled() || IsAbstract() || IsRuntimeMethod() || IsProxyMethod()) {
-    return nullptr;
-  }
-  Runtime* runtime = Runtime::Current();
-  const void* entry_point = runtime->GetInstrumentation()->GetQuickCodeFor(this);
-  // On failure, instead of nullptr we get the quick-generic-jni-trampoline for native method
-  // indicating the generic JNI, or the quick-to-interpreter-bridge (but not the trampoline)
-  // for non-native methods.
-  DCHECK(entry_point != runtime->GetClassLinker()->GetQuickToInterpreterBridgeTrampoline());
-  if (UNLIKELY(entry_point == GetQuickToInterpreterBridge()) ||
-      UNLIKELY(entry_point == runtime->GetClassLinker()->GetQuickGenericJniTrampoline())) {
-    return nullptr;
-  }
-  return entry_point;
-}
-
-inline const void* ArtMethod::GetQuickOatCodePointer() {
-  return EntryPointToCodePointer(GetQuickOatEntryPoint());
-}
-
 inline const uint8_t* ArtMethod::GetMappingTable() {
   const void* code_pointer = GetQuickOatCodePointer();
   if (code_pointer == nullptr) {
@@ -341,69 +293,17 @@
   return result;
 }
 
-inline uintptr_t ArtMethod::NativePcOffset(const uintptr_t pc) {
+inline uintptr_t ArtMethod::NativeQuickPcOffset(const uintptr_t pc) {
   const void* code = Runtime::Current()->GetInstrumentation()->GetQuickCodeFor(this);
   return pc - reinterpret_cast<uintptr_t>(code);
 }
 
-inline uintptr_t ArtMethod::NativePcOffset(const uintptr_t pc, const void* quick_entry_point) {
-  DCHECK(quick_entry_point != GetQuickToInterpreterBridge());
-  DCHECK(quick_entry_point == Runtime::Current()->GetInstrumentation()->GetQuickCodeFor(this));
-  return pc - reinterpret_cast<uintptr_t>(quick_entry_point);
-}
-
 template<VerifyObjectFlags kVerifyFlags>
 inline void ArtMethod::SetNativeMethod(const void* native_method) {
   SetFieldPtr<false, true, kVerifyFlags>(
       OFFSET_OF_OBJECT_MEMBER(ArtMethod, entry_point_from_jni_), native_method);
 }
 
-inline QuickMethodFrameInfo ArtMethod::GetQuickFrameInfo() {
-  if (UNLIKELY(IsPortableCompiled())) {
-    // Portable compiled dex bytecode or jni stub.
-    return QuickMethodFrameInfo(kStackAlignment, 0u, 0u);
-  }
-  Runtime* runtime = Runtime::Current();
-  // For Proxy method we exclude direct method (there is only one direct method - constructor).
-  // Direct method is cloned from original java.lang.reflect.Proxy class together with code
-  // and as a result it is executed as usual quick compiled method without any stubs.
-  // So the frame info should be returned as it is a quick method not a stub.
-  if (UNLIKELY(IsAbstract()) || UNLIKELY(IsProxyMethod() && !IsDirect())) {
-    return runtime->GetCalleeSaveMethodFrameInfo(Runtime::kRefsAndArgs);
-  }
-  if (UNLIKELY(IsRuntimeMethod())) {
-    return runtime->GetRuntimeMethodFrameInfo(this);
-  }
-
-  const void* entry_point = runtime->GetInstrumentation()->GetQuickCodeFor(this);
-  // On failure, instead of nullptr we get the quick-generic-jni-trampoline for native method
-  // indicating the generic JNI, or the quick-to-interpreter-bridge (but not the trampoline)
-  // for non-native methods. And we really shouldn't see a failure for non-native methods here.
-  DCHECK(entry_point != runtime->GetClassLinker()->GetQuickToInterpreterBridgeTrampoline());
-  CHECK(entry_point != GetQuickToInterpreterBridge());
-
-  if (UNLIKELY(entry_point == runtime->GetClassLinker()->GetQuickGenericJniTrampoline())) {
-    // Generic JNI frame.
-    DCHECK(IsNative());
-    StackHandleScope<1> hs(Thread::Current());
-    uint32_t handle_refs =
-        MethodHelper(hs.NewHandle(this)).GetNumberOfReferenceArgsWithoutReceiver() + 1;
-    size_t scope_size = HandleScope::SizeOf(handle_refs);
-    QuickMethodFrameInfo callee_info = runtime->GetCalleeSaveMethodFrameInfo(Runtime::kRefsAndArgs);
-
-    // Callee saves + handle scope + method ref + alignment
-    size_t frame_size = RoundUp(callee_info.FrameSizeInBytes() + scope_size
-                                - sizeof(void*)  // callee-save frame stores a whole method pointer
-                                + sizeof(StackReference<mirror::ArtMethod>),
-                                kStackAlignment);
-
-    return QuickMethodFrameInfo(frame_size, callee_info.CoreSpillMask(), callee_info.FpSpillMask());
-  }
-
-  const void* code_pointer = EntryPointToCodePointer(entry_point);
-  return GetQuickFrameInfo(code_pointer);
-}
-
 inline QuickMethodFrameInfo ArtMethod::GetQuickFrameInfo(const void* code_pointer) {
   DCHECK(code_pointer != nullptr);
   DCHECK_EQ(code_pointer, GetQuickOatCodePointer());
diff --git a/runtime/mirror/art_method.cc b/runtime/mirror/art_method.cc
index 787c767..5c72e55 100644
--- a/runtime/mirror/art_method.cc
+++ b/runtime/mirror/art_method.cc
@@ -23,6 +23,8 @@
 #include "class-inl.h"
 #include "dex_file-inl.h"
 #include "dex_instruction.h"
+#include "entrypoints/entrypoint_utils.h"
+#include "entrypoints/runtime_asm_entrypoints.h"
 #include "gc/accounting/card_table-inl.h"
 #include "interpreter/interpreter.h"
 #include "jni_internal.h"
@@ -203,7 +205,7 @@
   return DexFile::kDexNoIndex;
 }
 
-uintptr_t ArtMethod::ToNativePc(const uint32_t dex_pc) {
+uintptr_t ArtMethod::ToNativeQuickPc(const uint32_t dex_pc) {
   const void* entry_point = GetQuickOatEntryPoint();
   MappingTable table(
       entry_point != nullptr ? GetMappingTable(EntryPointToCodePointer(entry_point)) : nullptr);
@@ -281,6 +283,36 @@
   return found_dex_pc;
 }
 
+void ArtMethod::AssertPcIsWithinQuickCode(uintptr_t pc) {
+  if (IsNative() || IsRuntimeMethod() || IsProxyMethod()) {
+    return;
+  }
+  if (pc == reinterpret_cast<uintptr_t>(GetQuickInstrumentationExitPc())) {
+    return;
+  }
+  const void* code = GetEntryPointFromQuickCompiledCode();
+  if (code == GetQuickInstrumentationEntryPoint()) {
+    return;
+  }
+  ClassLinker* class_linker = Runtime::Current()->GetClassLinker();
+  if (class_linker->IsQuickToInterpreterBridge(code) ||
+      class_linker->IsQuickResolutionStub(code)) {
+    return;
+  }
+  /*
+   * During a stack walk, a return PC may point past-the-end of the code
+   * in the case that the last instruction is a call that isn't expected to
+   * return.  Thus, we check <= code + GetCodeSize().
+   *
+   * NOTE: For Thumb both pc and code are offset by 1 indicating the Thumb state.
+   */
+  CHECK(PcIsWithinQuickCode(pc))
+      << PrettyMethod(this)
+      << " pc=" << std::hex << pc
+      << " code=" << code
+      << " size=" << GetCodeSize();
+}
+
 bool ArtMethod::IsEntrypointInterpreter() {
   ClassLinker* class_linker = Runtime::Current()->GetClassLinker();
   const void* oat_quick_code = class_linker->GetOatMethodQuickCodeFor(this);
@@ -294,6 +326,31 @@
   }
 }
 
+const void* ArtMethod::GetQuickOatEntryPoint() {
+  if (IsPortableCompiled() || IsAbstract() || IsRuntimeMethod() || IsProxyMethod()) {
+    return nullptr;
+  }
+  Runtime* runtime = Runtime::Current();
+  ClassLinker* class_linker = runtime->GetClassLinker();
+  const void* code = runtime->GetInstrumentation()->GetQuickCodeFor(this);
+  // On failure, instead of nullptr we get the quick-generic-jni-trampoline for native method
+  // indicating the generic JNI, or the quick-to-interpreter-bridge (but not the trampoline)
+  // for non-native methods.
+  if (class_linker->IsQuickToInterpreterBridge(code) ||
+      class_linker->IsQuickGenericJniStub(code)) {
+    return nullptr;
+  }
+  return code;
+}
+
+#ifndef NDEBUG
+uintptr_t ArtMethod::NativeQuickPcOffset(const uintptr_t pc, const void* quick_entry_point) {
+  CHECK_NE(quick_entry_point, GetQuickToInterpreterBridge());
+  CHECK_EQ(quick_entry_point, Runtime::Current()->GetInstrumentation()->GetQuickCodeFor(this));
+  return pc - reinterpret_cast<uintptr_t>(quick_entry_point);
+}
+#endif
+
 void ArtMethod::Invoke(Thread* self, uint32_t* args, uint32_t args_size, JValue* result,
                        const char* shorty) {
   if (UNLIKELY(__builtin_frame_address(0) < self->GetStackEnd())) {
@@ -379,8 +436,53 @@
   self->PopManagedStackFragment(fragment);
 }
 
-void ArtMethod::RegisterNative(Thread* self, const void* native_method, bool is_fast) {
-  DCHECK(Thread::Current() == self);
+QuickMethodFrameInfo ArtMethod::GetQuickFrameInfo() {
+  if (UNLIKELY(IsPortableCompiled())) {
+    // Portable compiled dex bytecode or jni stub.
+    return QuickMethodFrameInfo(kStackAlignment, 0u, 0u);
+  }
+  Runtime* runtime = Runtime::Current();
+  // For Proxy method we exclude direct method (there is only one direct method - constructor).
+  // Direct method is cloned from original java.lang.reflect.Proxy class together with code
+  // and as a result it is executed as usual quick compiled method without any stubs.
+  // So the frame info should be returned as it is a quick method not a stub.
+  if (UNLIKELY(IsAbstract()) || UNLIKELY(IsProxyMethod() && !IsDirect())) {
+    return runtime->GetCalleeSaveMethodFrameInfo(Runtime::kRefsAndArgs);
+  }
+  if (UNLIKELY(IsRuntimeMethod())) {
+    return runtime->GetRuntimeMethodFrameInfo(this);
+  }
+
+  const void* entry_point = runtime->GetInstrumentation()->GetQuickCodeFor(this);
+  ClassLinker* class_linker = runtime->GetClassLinker();
+  // On failure, instead of nullptr we get the quick-generic-jni-trampoline for native method
+  // indicating the generic JNI, or the quick-to-interpreter-bridge (but not the trampoline)
+  // for non-native methods. And we really shouldn't see a failure for non-native methods here.
+  DCHECK(!class_linker->IsQuickToInterpreterBridge(entry_point));
+
+  if (class_linker->IsQuickGenericJniStub(entry_point)) {
+    // Generic JNI frame.
+    DCHECK(IsNative());
+    StackHandleScope<1> hs(Thread::Current());
+    uint32_t handle_refs =
+        MethodHelper(hs.NewHandle(this)).GetNumberOfReferenceArgsWithoutReceiver() + 1;
+    size_t scope_size = HandleScope::SizeOf(handle_refs);
+    QuickMethodFrameInfo callee_info = runtime->GetCalleeSaveMethodFrameInfo(Runtime::kRefsAndArgs);
+
+    // Callee saves + handle scope + method ref + alignment
+    size_t frame_size = RoundUp(callee_info.FrameSizeInBytes() + scope_size
+                                - sizeof(void*)  // callee-save frame stores a whole method pointer
+                                + sizeof(StackReference<mirror::ArtMethod>),
+                                kStackAlignment);
+
+    return QuickMethodFrameInfo(frame_size, callee_info.CoreSpillMask(), callee_info.FpSpillMask());
+  }
+
+  const void* code_pointer = EntryPointToCodePointer(entry_point);
+  return GetQuickFrameInfo(code_pointer);
+}
+
+void ArtMethod::RegisterNative(const void* native_method, bool is_fast) {
   CHECK(IsNative()) << PrettyMethod(this);
   CHECK(!IsFastNative()) << PrettyMethod(this);
   CHECK(native_method != NULL) << PrettyMethod(this);
@@ -390,10 +492,10 @@
   SetNativeMethod(native_method);
 }
 
-void ArtMethod::UnregisterNative(Thread* self) {
+void ArtMethod::UnregisterNative() {
   CHECK(IsNative() && !IsFastNative()) << PrettyMethod(this);
   // restore stub to lookup native pointer via dlsym
-  RegisterNative(self, GetJniDlsymLookupStub(), false);
+  RegisterNative(GetJniDlsymLookupStub(), false);
 }
 
 }  // namespace mirror
diff --git a/runtime/mirror/art_method.h b/runtime/mirror/art_method.h
index 939d856..1dbfe5d 100644
--- a/runtime/mirror/art_method.h
+++ b/runtime/mirror/art_method.h
@@ -39,7 +39,7 @@
 
 namespace mirror {
 
-typedef void (EntryPointFromInterpreter)(Thread* self, MethodHelper& mh,
+typedef void (EntryPointFromInterpreter)(Thread* self, MethodHelper* mh,
     const DexFile::CodeItem* code_item, ShadowFrame* shadow_frame, JValue* result);
 
 // C++ mirror of java.lang.reflect.ArtMethod.
@@ -302,7 +302,10 @@
 
   uint32_t GetCodeSize() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
 
-  bool IsWithinQuickCode(uintptr_t pc) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+  // Check whether the given PC is within the quick compiled code associated with this method's
+  // quick entrypoint. This code isn't robust for instrumentation, etc. and is only used for
+  // debug purposes.
+  bool PcIsWithinQuickCode(uintptr_t pc) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
     uintptr_t code = reinterpret_cast<uintptr_t>(GetEntryPointFromQuickCompiledCode());
     if (code == 0) {
       return pc == 0;
@@ -329,16 +332,19 @@
   void SetQuickOatCodeOffset(uint32_t code_offset) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
   void SetPortableOatCodeOffset(uint32_t code_offset) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
 
-  static const void* EntryPointToCodePointer(const void* entry_point) ALWAYS_INLINE {
+  ALWAYS_INLINE static const void* EntryPointToCodePointer(const void* entry_point) {
     uintptr_t code = reinterpret_cast<uintptr_t>(entry_point);
     code &= ~0x1;  // TODO: Make this Thumb2 specific.
     return reinterpret_cast<const void*>(code);
   }
 
-  // Actual entry point pointer to compiled oat code or nullptr.
+  // Actual entry point pointer to compiled oat code or nullptr if method has no compiled code.
   const void* GetQuickOatEntryPoint() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+
   // Actual pointer to compiled oat code or nullptr.
-  const void* GetQuickOatCodePointer() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+  const void* GetQuickOatCodePointer() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+    return EntryPointToCodePointer(GetQuickOatEntryPoint());
+  }
 
   // Callers should wrap the uint8_t* in a MappingTable instance for convenient access.
   const uint8_t* GetMappingTable() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
@@ -378,24 +384,25 @@
   QuickMethodFrameInfo GetQuickFrameInfo(const void* code_pointer)
       SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
 
-  size_t GetReturnPcOffsetInBytes() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
-    return GetReturnPcOffsetInBytes(GetFrameSizeInBytes());
+  FrameOffset GetReturnPcOffset() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+    return GetReturnPcOffset(GetFrameSizeInBytes());
   }
 
-  size_t GetReturnPcOffsetInBytes(uint32_t frame_size_in_bytes)
+  FrameOffset GetReturnPcOffset(uint32_t frame_size_in_bytes)
       SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
     DCHECK_EQ(frame_size_in_bytes, GetFrameSizeInBytes());
-    return frame_size_in_bytes - sizeof(void*);
+    return FrameOffset(frame_size_in_bytes - sizeof(void*));
   }
 
-  size_t GetHandleScopeOffsetInBytes() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
-    return sizeof(void*);
+  FrameOffset GetHandleScopeOffset() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+    DCHECK_LT(sizeof(void*), GetFrameSizeInBytes());
+    return FrameOffset(sizeof(void*));
   }
 
-  void RegisterNative(Thread* self, const void* native_method, bool is_fast)
+  void RegisterNative(const void* native_method, bool is_fast)
       SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
 
-  void UnregisterNative(Thread* self) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+  void UnregisterNative() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
 
   static MemberOffset NativeMethodOffset() {
     return OFFSET_OF_OBJECT_MEMBER(ArtMethod, entry_point_from_jni_);
@@ -423,16 +430,23 @@
 
   bool IsImtConflictMethod() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
 
-  uintptr_t NativePcOffset(const uintptr_t pc) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
-  uintptr_t NativePcOffset(const uintptr_t pc, const void* quick_entry_point)
+  uintptr_t NativeQuickPcOffset(const uintptr_t pc) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+#ifdef NDEBUG
+  uintptr_t NativeQuickPcOffset(const uintptr_t pc, const void* quick_entry_point)
+      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+    return pc - reinterpret_cast<uintptr_t>(quick_entry_point);
+  }
+#else
+  uintptr_t NativeQuickPcOffset(const uintptr_t pc, const void* quick_entry_point)
       SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+#endif
 
   // Converts a native PC to a dex PC.
   uint32_t ToDexPc(const uintptr_t pc, bool abort_on_failure = true)
       SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
 
   // Converts a dex PC to a native PC.
-  uintptr_t ToNativePc(const uint32_t dex_pc) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+  uintptr_t ToNativeQuickPc(const uint32_t dex_pc) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
 
   // Find the catch block for the given exception type and dex_pc. When a catch block is found,
   // indicates whether the found catch block is responsible for clearing the exception or whether
diff --git a/runtime/mirror/string.h b/runtime/mirror/string.h
index 1320ab7..64408a6 100644
--- a/runtime/mirror/string.h
+++ b/runtime/mirror/string.h
@@ -17,8 +17,6 @@
 #ifndef ART_RUNTIME_MIRROR_STRING_H_
 #define ART_RUNTIME_MIRROR_STRING_H_
 
-#include <gtest/gtest.h>
-
 #include "gc_root.h"
 #include "object.h"
 #include "object_callbacks.h"
@@ -163,7 +161,8 @@
   static GcRoot<Class> java_lang_String_;
 
   friend struct art::StringOffsets;  // for verifying offset information
-  FRIEND_TEST(ObjectTest, StringLength);  // for SetOffset and SetCount
+  ART_FRIEND_TEST(ObjectTest, StringLength);  // for SetOffset and SetCount
+
   DISALLOW_IMPLICIT_CONSTRUCTORS(String);
 };
 
diff --git a/runtime/oat.cc b/runtime/oat.cc
index 6810d73..a237bf6 100644
--- a/runtime/oat.cc
+++ b/runtime/oat.cc
@@ -39,7 +39,7 @@
 }
 
 OatHeader* OatHeader::Create(InstructionSet instruction_set,
-                             const InstructionSetFeatures& instruction_set_features,
+                             const InstructionSetFeatures* instruction_set_features,
                              const std::vector<const DexFile*>* dex_files,
                              uint32_t image_file_location_oat_checksum,
                              uint32_t image_file_location_oat_data_begin,
@@ -60,7 +60,7 @@
 }
 
 OatHeader::OatHeader(InstructionSet instruction_set,
-                     const InstructionSetFeatures& instruction_set_features,
+                     const InstructionSetFeatures* instruction_set_features,
                      const std::vector<const DexFile*>* dex_files,
                      uint32_t image_file_location_oat_checksum,
                      uint32_t image_file_location_oat_data_begin,
@@ -76,8 +76,8 @@
   instruction_set_ = instruction_set;
   UpdateChecksum(&instruction_set_, sizeof(instruction_set_));
 
-  instruction_set_features_ = instruction_set_features;
-  UpdateChecksum(&instruction_set_features_, sizeof(instruction_set_features_));
+  instruction_set_features_bitmap_ = instruction_set_features->AsBitmap();
+  UpdateChecksum(&instruction_set_features_bitmap_, sizeof(instruction_set_features_bitmap_));
 
   dex_file_count_ = dex_files->size();
   UpdateChecksum(&dex_file_count_, sizeof(dex_file_count_));
@@ -149,9 +149,9 @@
   return instruction_set_;
 }
 
-const InstructionSetFeatures& OatHeader::GetInstructionSetFeatures() const {
+uint32_t OatHeader::GetInstructionSetFeaturesBitmap() const {
   CHECK(IsValid());
-  return instruction_set_features_;
+  return instruction_set_features_bitmap_;
 }
 
 uint32_t OatHeader::GetExecutableOffset() const {
diff --git a/runtime/oat.h b/runtime/oat.h
index 6a32e3e..92b98b1 100644
--- a/runtime/oat.h
+++ b/runtime/oat.h
@@ -37,7 +37,7 @@
   static constexpr const char* kDex2OatHostKey = "dex2oat-host";
 
   static OatHeader* Create(InstructionSet instruction_set,
-                           const InstructionSetFeatures& instruction_set_features,
+                           const InstructionSetFeatures* instruction_set_features,
                            const std::vector<const DexFile*>* dex_files,
                            uint32_t image_file_location_oat_checksum,
                            uint32_t image_file_location_oat_data_begin,
@@ -93,7 +93,7 @@
   void SetImagePatchDelta(int32_t off);
 
   InstructionSet GetInstructionSet() const;
-  const InstructionSetFeatures& GetInstructionSetFeatures() const;
+  uint32_t GetInstructionSetFeaturesBitmap() const;
   uint32_t GetImageFileLocationOatChecksum() const;
   uint32_t GetImageFileLocationOatDataBegin() const;
 
@@ -106,7 +106,7 @@
 
  private:
   OatHeader(InstructionSet instruction_set,
-            const InstructionSetFeatures& instruction_set_features,
+            const InstructionSetFeatures* instruction_set_features,
             const std::vector<const DexFile*>* dex_files,
             uint32_t image_file_location_oat_checksum,
             uint32_t image_file_location_oat_data_begin,
@@ -119,7 +119,7 @@
   uint32_t adler32_checksum_;
 
   InstructionSet instruction_set_;
-  InstructionSetFeatures instruction_set_features_;
+  uint32_t instruction_set_features_bitmap_;
   uint32_t dex_file_count_;
   uint32_t executable_offset_;
   uint32_t interpreter_to_interpreter_bridge_offset_;
diff --git a/runtime/parsed_options.cc b/runtime/parsed_options.cc
index d820026..dcca9d3 100644
--- a/runtime/parsed_options.cc
+++ b/runtime/parsed_options.cc
@@ -205,7 +205,7 @@
 
 bool ParsedOptions::ParseXGcOption(const std::string& option) {
   std::vector<std::string> gc_options;
-  Split(option.substr(strlen("-Xgc:")), ',', gc_options);
+  Split(option.substr(strlen("-Xgc:")), ',', &gc_options);
   for (const std::string& gc_option : gc_options) {
     gc::CollectorType collector_type = ParseCollectorType(gc_option);
     if (collector_type != gc::kCollectorTypeNone) {
@@ -501,7 +501,7 @@
       is_explicit_gc_disabled_ = true;
     } else if (StartsWith(option, "-verbose:")) {
       std::vector<std::string> verbose_options;
-      Split(option.substr(strlen("-verbose:")), ',', verbose_options);
+      Split(option.substr(strlen("-verbose:")), ',', &verbose_options);
       for (size_t i = 0; i < verbose_options.size(); ++i) {
         if (verbose_options[i] == "class") {
           gLogVerbosity.class_linker = true;
@@ -536,7 +536,7 @@
       }
     } else if (StartsWith(option, "-verbose-methods:")) {
       gLogVerbosity.compiler = false;
-      Split(option.substr(strlen("-verbose-methods:")), ',', gVerboseMethods);
+      Split(option.substr(strlen("-verbose-methods:")), ',', &gVerboseMethods);
     } else if (StartsWith(option, "-Xlockprofthreshold:")) {
       if (!ParseUnsignedInteger(option, ':', &lock_profiling_threshold_)) {
         return false;
diff --git a/runtime/profiler.cc b/runtime/profiler.cc
index cde4177..1d06d35 100644
--- a/runtime/profiler.cc
+++ b/runtime/profiler.cc
@@ -16,9 +16,11 @@
 
 #include "profiler.h"
 
-#include <fstream>
-#include <sys/uio.h>
 #include <sys/file.h>
+#include <sys/stat.h>
+#include <sys/uio.h>
+
+#include <fstream>
 
 #include "base/stl_util.h"
 #include "base/unix_file/fd_file.h"
@@ -742,7 +744,7 @@
     return;
   }
   std::vector<std::string> summary_info;
-  Split(line, '/', summary_info);
+  Split(line, '/', &summary_info);
   if (summary_info.size() != 3) {
     // Bad summary info.  It should be count/nullcount/bootcount
     return;
@@ -757,7 +759,7 @@
       break;
     }
     std::vector<std::string> info;
-    Split(line, '/', info);
+    Split(line, '/', &info);
     if (info.size() != 3 && info.size() != 4) {
       // Malformed.
       break;
@@ -770,10 +772,10 @@
       context_map = new PreviousContextMap();
       std::string context_counts_str = info[3].substr(1, info[3].size() - 2);
       std::vector<std::string> context_count_pairs;
-      Split(context_counts_str, '#', context_count_pairs);
+      Split(context_counts_str, '#', &context_count_pairs);
       for (uint32_t i = 0; i < context_count_pairs.size(); ++i) {
         std::vector<std::string> context_count;
-        Split(context_count_pairs[i], ':', context_count);
+        Split(context_count_pairs[i], ':', &context_count);
         if (context_count.size() == 2) {
           // Handles the situtation when the profile file doesn't contain context information.
           uint32_t dexpc = strtoul(context_count[0].c_str(), nullptr, 10);
@@ -819,7 +821,7 @@
     return false;
   }
   std::vector<std::string> summary_info;
-  Split(line, '/', summary_info);
+  Split(line, '/', &summary_info);
   if (summary_info.size() != 3) {
     // Bad summary info.  It should be total/null/boot.
     return false;
@@ -837,7 +839,7 @@
       break;
     }
     std::vector<std::string> info;
-    Split(line, '/', info);
+    Split(line, '/', &info);
     if (info.size() != 3 && info.size() != 4) {
       // Malformed.
       return false;
diff --git a/runtime/quick_exception_handler.cc b/runtime/quick_exception_handler.cc
index 2c158ba..8e57837 100644
--- a/runtime/quick_exception_handler.cc
+++ b/runtime/quick_exception_handler.cc
@@ -19,6 +19,7 @@
 #include "arch/context.h"
 #include "dex_instruction.h"
 #include "entrypoints/entrypoint_utils.h"
+#include "entrypoints/runtime_asm_entrypoints.h"
 #include "handle_scope-inl.h"
 #include "mirror/art_method-inl.h"
 #include "mirror/class-inl.h"
@@ -96,7 +97,7 @@
       if (found_dex_pc != DexFile::kDexNoIndex) {
         exception_handler_->SetHandlerMethod(method.Get());
         exception_handler_->SetHandlerDexPc(found_dex_pc);
-        exception_handler_->SetHandlerQuickFramePc(method->ToNativePc(found_dex_pc));
+        exception_handler_->SetHandlerQuickFramePc(method->ToNativeQuickPc(found_dex_pc));
         exception_handler_->SetHandlerQuickFrame(GetCurrentQuickFrame());
         return false;  // End stack walk.
       }
@@ -308,7 +309,7 @@
     size_t current_frame_depth = GetFrameDepth();
     if (current_frame_depth < frame_depth_) {
       CHECK(GetMethod() != nullptr);
-      if (UNLIKELY(GetQuickInstrumentationExitPc() == GetReturnPc())) {
+      if (UNLIKELY(reinterpret_cast<uintptr_t>(GetQuickInstrumentationExitPc()) == GetReturnPc())) {
         ++instrumentation_frames_to_pop_;
       }
       return true;
diff --git a/runtime/reflection.cc b/runtime/reflection.cc
index 23f8076..b57e48f 100644
--- a/runtime/reflection.cc
+++ b/runtime/reflection.cc
@@ -19,6 +19,7 @@
 #include "class_linker.h"
 #include "common_throws.h"
 #include "dex_file-inl.h"
+#include "entrypoints/entrypoint_utils.h"
 #include "jni_internal.h"
 #include "method_helper-inl.h"
 #include "mirror/art_field-inl.h"
@@ -528,7 +529,7 @@
 }
 
 void InvokeWithShadowFrame(Thread* self, ShadowFrame* shadow_frame, uint16_t arg_offset,
-                           MethodHelper& mh, JValue* result) {
+                           MethodHelper* mh, JValue* result) {
   // We want to make sure that the stack is not within a small distance from the
   // protected region in case we are calling into a leaf function whose stack
   // check has been elided.
@@ -537,10 +538,10 @@
     return;
   }
 
-  ArgArray arg_array(mh.GetShorty(), mh.GetShortyLength());
+  ArgArray arg_array(mh->GetShorty(), mh->GetShortyLength());
   arg_array.BuildArgArrayFromFrame(shadow_frame, arg_offset);
   shadow_frame->GetMethod()->Invoke(self, arg_array.GetArray(), arg_array.GetNumBytes(), result,
-                                    mh.GetShorty());
+                                    mh->GetShorty());
 }
 
 jobject InvokeMethod(const ScopedObjectAccessAlreadyRunnable& soa, jobject javaMethod,
diff --git a/runtime/reflection.h b/runtime/reflection.h
index 23d8e05..f9a7951 100644
--- a/runtime/reflection.h
+++ b/runtime/reflection.h
@@ -65,7 +65,7 @@
     SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
 
 void InvokeWithShadowFrame(Thread* self, ShadowFrame* shadow_frame, uint16_t arg_offset,
-                           MethodHelper& mh, JValue* result)
+                           MethodHelper* mh, JValue* result)
     SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
 
 jobject InvokeMethod(const ScopedObjectAccessAlreadyRunnable& soa, jobject method, jobject receiver,
diff --git a/runtime/runtime.cc b/runtime/runtime.cc
index 3bd825b..e366084 100644
--- a/runtime/runtime.cc
+++ b/runtime/runtime.cc
@@ -47,6 +47,7 @@
 #include "class_linker.h"
 #include "debugger.h"
 #include "elf_file.h"
+#include "entrypoints/runtime_asm_entrypoints.h"
 #include "fault_handler.h"
 #include "gc/accounting/card_table-inl.h"
 #include "gc/heap.h"
@@ -92,10 +93,9 @@
 
 namespace art {
 
+// If a signal isn't handled properly, enable a handler that attempts to dump the Java stack.
 static constexpr bool kEnableJavaStackTraceHandler = false;
-const char* Runtime::kDefaultInstructionSetFeatures =
-    STRINGIFY(ART_DEFAULT_INSTRUCTION_SET_FEATURES);
-Runtime* Runtime::instance_ = NULL;
+Runtime* Runtime::instance_ = nullptr;
 
 Runtime::Runtime()
     : instruction_set_(kNone),
@@ -803,7 +803,7 @@
     }
   } else if (!IsCompiler() || !image_dex2oat_enabled_) {
     std::vector<std::string> dex_filenames;
-    Split(boot_class_path_string_, ':', dex_filenames);
+    Split(boot_class_path_string_, ':', &dex_filenames);
     std::vector<const DexFile*> boot_class_path;
     OpenDexFiles(dex_filenames, options->image_, boot_class_path);
     class_linker_->InitWithoutImage(boot_class_path);
@@ -1216,8 +1216,8 @@
     method->SetEntryPointFromPortableCompiledCode(nullptr);
     method->SetEntryPointFromQuickCompiledCode(nullptr);
   } else {
-    method->SetEntryPointFromPortableCompiledCode(class_linker->GetPortableImtConflictTrampoline());
-    method->SetEntryPointFromQuickCompiledCode(class_linker->GetQuickImtConflictTrampoline());
+    method->SetEntryPointFromPortableCompiledCode(GetPortableImtConflictStub());
+    method->SetEntryPointFromQuickCompiledCode(GetQuickImtConflictStub());
   }
   return method.Get();
 }
@@ -1236,8 +1236,8 @@
     method->SetEntryPointFromPortableCompiledCode(nullptr);
     method->SetEntryPointFromQuickCompiledCode(nullptr);
   } else {
-    method->SetEntryPointFromPortableCompiledCode(class_linker->GetPortableResolutionTrampoline());
-    method->SetEntryPointFromQuickCompiledCode(class_linker->GetQuickResolutionTrampoline());
+    method->SetEntryPointFromPortableCompiledCode(GetPortableResolutionStub());
+    method->SetEntryPointFromQuickCompiledCode(GetQuickResolutionStub());
   }
   return method.Get();
 }
@@ -1454,9 +1454,10 @@
   instruction_set += GetInstructionSetString(kRuntimeISA);
   argv->push_back(instruction_set);
 
-  std::string features("--instruction-set-features=");
-  features += GetDefaultInstructionSetFeatures();
-  argv->push_back(features);
+  std::unique_ptr<const InstructionSetFeatures> features(InstructionSetFeatures::FromCppDefines());
+  std::string feature_string("--instruction-set-features=");
+  feature_string += features->GetFeatureString();
+  argv->push_back(feature_string);
 }
 
 void Runtime::UpdateProfilerState(int state) {
diff --git a/runtime/runtime.h b/runtime/runtime.h
index 7bffc33..f3bea17 100644
--- a/runtime/runtime.h
+++ b/runtime/runtime.h
@@ -483,10 +483,6 @@
     return target_sdk_version_;
   }
 
-  static const char* GetDefaultInstructionSetFeatures() {
-    return kDefaultInstructionSetFeatures;
-  }
-
  private:
   static void InitPlatformSignalHandlers();
 
@@ -506,8 +502,6 @@
   // A pointer to the active runtime or NULL.
   static Runtime* instance_;
 
-  static const char* kDefaultInstructionSetFeatures;
-
   // NOTE: these must match the gc::ProcessState values as they come directly from the framework.
   static constexpr int kProfileForground = 0;
   static constexpr int kProfileBackgrouud = 1;
diff --git a/runtime/stack.cc b/runtime/stack.cc
index 008941f..b4e85e2 100644
--- a/runtime/stack.cc
+++ b/runtime/stack.cc
@@ -18,6 +18,7 @@
 
 #include "arch/context.h"
 #include "base/hex_dump.h"
+#include "entrypoints/runtime_asm_entrypoints.h"
 #include "mirror/art_method-inl.h"
 #include "mirror/class-inl.h"
 #include "mirror/object.h"
@@ -119,7 +120,7 @@
   } else if (m->IsNative()) {
     if (cur_quick_frame_ != nullptr) {
       HandleScope* hs = reinterpret_cast<HandleScope*>(
-          reinterpret_cast<char*>(cur_quick_frame_) + m->GetHandleScopeOffsetInBytes());
+          reinterpret_cast<char*>(cur_quick_frame_) + m->GetHandleScopeOffset().SizeValue());
       return hs->GetReference(0);
     } else {
       return cur_shadow_frame_->GetVRegReference(0);
@@ -143,7 +144,7 @@
 
 size_t StackVisitor::GetNativePcOffset() const {
   DCHECK(!IsShadowFrame());
-  return GetMethod()->NativePcOffset(cur_quick_frame_pc_);
+  return GetMethod()->NativeQuickPcOffset(cur_quick_frame_pc_);
 }
 
 bool StackVisitor::GetVReg(mirror::ArtMethod* m, uint16_t vreg, VRegKind kind,
@@ -394,14 +395,14 @@
 uintptr_t StackVisitor::GetReturnPc() const {
   uint8_t* sp = reinterpret_cast<uint8_t*>(GetCurrentQuickFrame());
   DCHECK(sp != NULL);
-  uint8_t* pc_addr = sp + GetMethod()->GetReturnPcOffsetInBytes();
+  uint8_t* pc_addr = sp + GetMethod()->GetReturnPcOffset().SizeValue();
   return *reinterpret_cast<uintptr_t*>(pc_addr);
 }
 
 void StackVisitor::SetReturnPc(uintptr_t new_ret_pc) {
   uint8_t* sp = reinterpret_cast<uint8_t*>(GetCurrentQuickFrame());
   CHECK(sp != NULL);
-  uint8_t* pc_addr = sp + GetMethod()->GetReturnPcOffsetInBytes();
+  uint8_t* pc_addr = sp + GetMethod()->GetReturnPcOffset().SizeValue();
   *reinterpret_cast<uintptr_t*>(pc_addr) = new_ret_pc;
 }
 
@@ -509,7 +510,7 @@
       // const size_t kMaxExpectedFrameSize = (256 + 2 + 3 + 3) * sizeof(word);
       const size_t kMaxExpectedFrameSize = 2 * KB;
       CHECK_LE(frame_size, kMaxExpectedFrameSize);
-      size_t return_pc_offset = method->GetReturnPcOffsetInBytes();
+      size_t return_pc_offset = method->GetReturnPcOffset().SizeValue();
       CHECK_LT(return_pc_offset, frame_size);
     }
   }
@@ -543,13 +544,13 @@
         }
         size_t frame_size = method->GetFrameSizeInBytes();
         // Compute PC for next stack frame from return PC.
-        size_t return_pc_offset = method->GetReturnPcOffsetInBytes(frame_size);
+        size_t return_pc_offset = method->GetReturnPcOffset(frame_size).SizeValue();
         uint8_t* return_pc_addr = reinterpret_cast<uint8_t*>(cur_quick_frame_) + return_pc_offset;
         uintptr_t return_pc = *reinterpret_cast<uintptr_t*>(return_pc_addr);
         if (UNLIKELY(exit_stubs_installed)) {
           // While profiling, the return pc is restored from the side stack, except when walking
           // the stack for an exception where the side stack will be unwound in VisitFrame.
-          if (GetQuickInstrumentationExitPc() == return_pc) {
+          if (reinterpret_cast<uintptr_t>(GetQuickInstrumentationExitPc()) == return_pc) {
             const instrumentation::InstrumentationStackFrame& instrumentation_frame =
                 GetInstrumentationStackFrame(thread_, instrumentation_stack_depth);
             instrumentation_stack_depth++;
diff --git a/runtime/thread.cc b/runtime/thread.cc
index fd37703..efe27ee 100644
--- a/runtime/thread.cc
+++ b/runtime/thread.cc
@@ -2108,7 +2108,7 @@
       if (m->IsOptimized()) {
         Runtime* runtime = Runtime::Current();
         const void* entry_point = runtime->GetInstrumentation()->GetQuickCodeFor(m);
-        uintptr_t native_pc_offset = m->NativePcOffset(GetCurrentQuickFramePc(), entry_point);
+        uintptr_t native_pc_offset = m->NativeQuickPcOffset(GetCurrentQuickFramePc(), entry_point);
         StackMap map = m->GetStackMap(native_pc_offset);
         MemoryRegion mask = map.GetStackMask();
         for (size_t i = 0; i < mask.size_in_bits(); ++i) {
@@ -2137,7 +2137,7 @@
         if (num_regs > 0) {
           Runtime* runtime = Runtime::Current();
           const void* entry_point = runtime->GetInstrumentation()->GetQuickCodeFor(m);
-          uintptr_t native_pc_offset = m->NativePcOffset(GetCurrentQuickFramePc(), entry_point);
+          uintptr_t native_pc_offset = m->NativeQuickPcOffset(GetCurrentQuickFramePc(), entry_point);
           const uint8_t* reg_bitmap = map.FindBitMap(native_pc_offset);
           DCHECK(reg_bitmap != nullptr);
           const void* code_pointer = mirror::ArtMethod::EntryPointToCodePointer(entry_point);
diff --git a/runtime/thread.h b/runtime/thread.h
index b0be841..32ed758 100644
--- a/runtime/thread.h
+++ b/runtime/thread.h
@@ -144,6 +144,9 @@
   // Reset internal state of child thread after fork.
   void InitAfterFork();
 
+  // Get the currently executing thread, frequently referred to as 'self'. This call has reasonably
+  // high cost and so we favor passing self around when possible.
+  // TODO: mark as PURE so the compiler may coalesce and remove?
   static Thread* Current();
 
   // On a runnable thread, check for pending thread suspension request and handle if pending.
diff --git a/runtime/utils.cc b/runtime/utils.cc
index 0688c1a..9c94f6c 100644
--- a/runtime/utils.cc
+++ b/runtime/utils.cc
@@ -963,7 +963,7 @@
   return IsValidClassName<kDescriptor, '/'>(s);
 }
 
-void Split(const std::string& s, char separator, std::vector<std::string>& result) {
+void Split(const std::string& s, char separator, std::vector<std::string>* result) {
   const char* p = s.data();
   const char* end = p + s.size();
   while (p != end) {
@@ -974,12 +974,12 @@
       while (++p != end && *p != separator) {
         // Skip to the next occurrence of the separator.
       }
-      result.push_back(std::string(start, p - start));
+      result->push_back(std::string(start, p - start));
     }
   }
 }
 
-std::string Trim(std::string s) {
+std::string Trim(const std::string& s) {
   std::string result;
   unsigned int start_index = 0;
   unsigned int end_index = s.size() - 1;
@@ -1009,7 +1009,7 @@
 }
 
 template <typename StringT>
-std::string Join(std::vector<StringT>& strings, char separator) {
+std::string Join(const std::vector<StringT>& strings, char separator) {
   if (strings.empty()) {
     return "";
   }
@@ -1023,9 +1023,8 @@
 }
 
 // Explicit instantiations.
-template std::string Join<std::string>(std::vector<std::string>& strings, char separator);
-template std::string Join<const char*>(std::vector<const char*>& strings, char separator);
-template std::string Join<char*>(std::vector<char*>& strings, char separator);
+template std::string Join<std::string>(const std::vector<std::string>& strings, char separator);
+template std::string Join<const char*>(const std::vector<const char*>& strings, char separator);
 
 bool StartsWith(const std::string& s, const char* prefix) {
   return s.compare(0, strlen(prefix), prefix) == 0;
@@ -1087,7 +1086,7 @@
   stats = stats.substr(stats.find(')') + 2);
   // Extract the three fields we care about.
   std::vector<std::string> fields;
-  Split(stats, ' ', fields);
+  Split(stats, ' ', &fields);
   *state = fields[0][0];
   *utime = strtoull(fields[11].c_str(), NULL, 10);
   *stime = strtoull(fields[12].c_str(), NULL, 10);
@@ -1104,12 +1103,12 @@
     return "";
   }
   std::vector<std::string> cgroup_lines;
-  Split(cgroup_file, '\n', cgroup_lines);
+  Split(cgroup_file, '\n', &cgroup_lines);
   for (size_t i = 0; i < cgroup_lines.size(); ++i) {
     std::vector<std::string> cgroup_fields;
-    Split(cgroup_lines[i], ':', cgroup_fields);
+    Split(cgroup_lines[i], ':', &cgroup_fields);
     std::vector<std::string> cgroups;
-    Split(cgroup_fields[1], ',', cgroups);
+    Split(cgroup_fields[1], ',', &cgroups);
     for (size_t i = 0; i < cgroups.size(); ++i) {
       if (cgroups[i] == "cpu") {
         return cgroup_fields[2].substr(1);  // Skip the leading slash.
@@ -1154,7 +1153,7 @@
         }
       } else if (current_method != nullptr &&
                  Locks::mutator_lock_->IsSharedHeld(Thread::Current()) &&
-                 current_method->IsWithinQuickCode(it->pc)) {
+                 current_method->PcIsWithinQuickCode(it->pc)) {
         const void* start_of_code = current_method->GetEntryPointFromQuickCompiledCode();
         os << JniLongName(current_method) << "+"
            << (it->pc - reinterpret_cast<uintptr_t>(start_of_code));
@@ -1189,7 +1188,7 @@
   }
 
   std::vector<std::string> kernel_stack_frames;
-  Split(kernel_stack, '\n', kernel_stack_frames);
+  Split(kernel_stack, '\n', &kernel_stack_frames);
   // We skip the last stack frame because it's always equivalent to "[<ffffffff>] 0xffffffff",
   // which looking at the source appears to be the kernel's way of saying "that's all, folks!".
   kernel_stack_frames.pop_back();
diff --git a/runtime/utils.h b/runtime/utils.h
index 53b49c8..b7daa64 100644
--- a/runtime/utils.h
+++ b/runtime/utils.h
@@ -380,13 +380,13 @@
 
 // Splits a string using the given separator character into a vector of
 // strings. Empty strings will be omitted.
-void Split(const std::string& s, char separator, std::vector<std::string>& result);
+void Split(const std::string& s, char separator, std::vector<std::string>* result);
 
 // Trims whitespace off both ends of the given string.
-std::string Trim(std::string s);
+std::string Trim(const std::string& s);
 
 // Joins a vector of strings into a single string, using the given separator.
-template <typename StringT> std::string Join(std::vector<StringT>& strings, char separator);
+template <typename StringT> std::string Join(const std::vector<StringT>& strings, char separator);
 
 // Returns the calling thread's tid. (The C libraries don't expose this.)
 pid_t GetTid();
diff --git a/runtime/utils_test.cc b/runtime/utils_test.cc
index 1b2c3ee..92323da 100644
--- a/runtime/utils_test.cc
+++ b/runtime/utils_test.cc
@@ -241,62 +241,62 @@
   expected.clear();
 
   actual.clear();
-  Split("", ':', actual);
+  Split("", ':', &actual);
   EXPECT_EQ(expected, actual);
 
   actual.clear();
-  Split(":", ':', actual);
+  Split(":", ':', &actual);
   EXPECT_EQ(expected, actual);
 
   expected.clear();
   expected.push_back("foo");
 
   actual.clear();
-  Split(":foo", ':', actual);
+  Split(":foo", ':', &actual);
   EXPECT_EQ(expected, actual);
 
   actual.clear();
-  Split("foo:", ':', actual);
+  Split("foo:", ':', &actual);
   EXPECT_EQ(expected, actual);
 
   actual.clear();
-  Split(":foo:", ':', actual);
+  Split(":foo:", ':', &actual);
   EXPECT_EQ(expected, actual);
 
   expected.push_back("bar");
 
   actual.clear();
-  Split("foo:bar", ':', actual);
+  Split("foo:bar", ':', &actual);
   EXPECT_EQ(expected, actual);
 
   actual.clear();
-  Split(":foo:bar", ':', actual);
+  Split(":foo:bar", ':', &actual);
   EXPECT_EQ(expected, actual);
 
   actual.clear();
-  Split("foo:bar:", ':', actual);
+  Split("foo:bar:", ':', &actual);
   EXPECT_EQ(expected, actual);
 
   actual.clear();
-  Split(":foo:bar:", ':', actual);
+  Split(":foo:bar:", ':', &actual);
   EXPECT_EQ(expected, actual);
 
   expected.push_back("baz");
 
   actual.clear();
-  Split("foo:bar:baz", ':', actual);
+  Split("foo:bar:baz", ':', &actual);
   EXPECT_EQ(expected, actual);
 
   actual.clear();
-  Split(":foo:bar:baz", ':', actual);
+  Split(":foo:bar:baz", ':', &actual);
   EXPECT_EQ(expected, actual);
 
   actual.clear();
-  Split("foo:bar:baz:", ':', actual);
+  Split("foo:bar:baz:", ':', &actual);
   EXPECT_EQ(expected, actual);
 
   actual.clear();
-  Split(":foo:bar:baz:", ':', actual);
+  Split(":foo:bar:baz:", ':', &actual);
   EXPECT_EQ(expected, actual);
 }
 
diff --git a/test/004-ReferenceMap/stack_walk_refmap_jni.cc b/test/004-ReferenceMap/stack_walk_refmap_jni.cc
index e914bd9..291b45f 100644
--- a/test/004-ReferenceMap/stack_walk_refmap_jni.cc
+++ b/test/004-ReferenceMap/stack_walk_refmap_jni.cc
@@ -22,7 +22,7 @@
 #define CHECK_REGS_CONTAIN_REFS(native_pc_offset, ...) do { \
   int t[] = {__VA_ARGS__}; \
   int t_size = sizeof(t) / sizeof(*t); \
-  CheckReferences(t, t_size, m->NativePcOffset(m->ToNativePc(native_pc_offset))); \
+  CheckReferences(t, t_size, m->NativeQuickPcOffset(m->ToNativeQuickPc(native_pc_offset))); \
 } while (false);
 
 struct ReferenceMap2Visitor : public CheckReferenceMapVisitor {
diff --git a/test/Android.libarttest.mk b/test/Android.libarttest.mk
index 2d139a6..fd95038 100644
--- a/test/Android.libarttest.mk
+++ b/test/Android.libarttest.mk
@@ -69,7 +69,7 @@
     LOCAL_CLANG := $(ART_HOST_CLANG)
     LOCAL_CFLAGS := $(ART_HOST_CFLAGS) $(ART_HOST_DEBUG_CFLAGS)
     LOCAL_STATIC_LIBRARIES := libcutils
-    LOCAL_LDLIBS += -ldl -lpthread
+    LOCAL_LDLIBS := $(ART_HOST_LDLIBS) -ldl -lpthread
     ifeq ($(HOST_OS),linux)
       LOCAL_LDLIBS += -lrt
     endif
diff --git a/test/Android.libnativebridgetest.mk b/test/Android.libnativebridgetest.mk
index dd7255a..5e2493c 100644
--- a/test/Android.libnativebridgetest.mk
+++ b/test/Android.libnativebridgetest.mk
@@ -62,7 +62,7 @@
     LOCAL_CLANG := $(ART_HOST_CLANG)
     LOCAL_CFLAGS := $(ART_HOST_CFLAGS) $(ART_HOST_DEBUG_CFLAGS)
     LOCAL_STATIC_LIBRARIES := libcutils
-    LOCAL_LDLIBS += -ldl -lpthread
+    LOCAL_LDLIBS := $(ART_HOST_LDLIBS) -ldl -lpthread
     ifeq ($(HOST_OS),linux)
       LOCAL_LDLIBS += -lrt
     endif
diff --git a/test/Android.run-test.mk b/test/Android.run-test.mk
index 9082b47..e066a38 100644
--- a/test/Android.run-test.mk
+++ b/test/Android.run-test.mk
@@ -52,7 +52,7 @@
 include $(CLEAR_VARS)
 LOCAL_MODULE_TAGS := tests
 LOCAL_MODULE := art-run-tests
-LOCAL_ADDITIONAL_DEPENDENCIES := $(TEST_ART_RUN_TEST_BUILD_RULES) smali dexmerger
+LOCAL_ADDITIONAL_DEPENDENCIES := $(TEST_ART_RUN_TEST_BUILD_RULES)
 # The build system use this flag to pick up files generated by declare-make-art-run-test.
 LOCAL_PICKUP_FILES := $(art_run_tests_dir)