Merge "Copyright assignment"
diff --git a/Android.mk b/Android.mk
index 18e1c64..7d31382 100644
--- a/Android.mk
+++ b/Android.mk
@@ -105,9 +105,13 @@
 
 
 # ART_HOST_DEPENDENCIES depends on Android.executable.mk above for ART_HOST_EXECUTABLES
-ART_HOST_DEPENDENCIES := $(ART_HOST_EXECUTABLES) $(HOST_OUT_JAVA_LIBRARIES)/core-libart-hostdex.jar \
+ART_HOST_DEPENDENCIES := \
+	$(ART_HOST_EXECUTABLES) \
+	$(HOST_OUT_JAVA_LIBRARIES)/core-libart-hostdex.jar \
 	$(HOST_LIBRARY_PATH)/libjavacore$(ART_HOST_SHLIB_EXTENSION)
-ART_TARGET_DEPENDENCIES := $(ART_TARGET_EXECUTABLES) $(TARGET_OUT_JAVA_LIBRARIES)/core-libart.jar \
+ART_TARGET_DEPENDENCIES := \
+	$(ART_TARGET_EXECUTABLES) \
+	$(TARGET_OUT_JAVA_LIBRARIES)/core-libart.jar \
 	$(TARGET_OUT_SHARED_LIBRARIES)/libjavacore.so
 ifdef TARGET_2ND_ARCH
 ART_TARGET_DEPENDENCIES += $(2ND_TARGET_OUT_SHARED_LIBRARIES)/libjavacore.so
diff --git a/build/Android.common.mk b/build/Android.common.mk
index 4023336..150b404 100644
--- a/build/Android.common.mk
+++ b/build/Android.common.mk
@@ -48,6 +48,8 @@
   ART_TARGET_ARCH_64 :=
 endif
 
+ART_HOST_SHLIB_EXTENSION := $(HOST_SHLIB_SUFFIX)
+ART_HOST_SHLIB_EXTENSION ?= .so
 ifeq ($(HOST_PREFER_32_BIT),true)
   ART_PHONY_TEST_HOST_SUFFIX := 32
   2ND_ART_PHONY_TEST_HOST_SUFFIX :=
@@ -56,6 +58,8 @@
   ART_HOST_ARCH := x86
   2ND_ART_HOST_ARCH :=
   2ND_HOST_ARCH :=
+  ART_HOST_LIBRARY_PATH := $(HOST_LIBRARY_PATH)
+  2ND_ART_HOST_LIBRARY_PATH :=
   ART_HOST_OUT_SHARED_LIBRARIES := $(2ND_HOST_OUT_SHARED_LIBRARIES)
   2ND_ART_HOST_OUT_SHARED_LIBRARIES :=
 else
@@ -66,6 +70,8 @@
   ART_HOST_ARCH := x86_64
   2ND_ART_HOST_ARCH := x86
   2ND_HOST_ARCH := x86
+  ART_HOST_LIBRARY_PATH := $(HOST_LIBRARY_PATH)
+  2ND_ART_HOST_LIBRARY_PATH := $(HOST_LIBRARY_PATH)32
   ART_HOST_OUT_SHARED_LIBRARIES := $(HOST_OUT_SHARED_LIBRARIES)
   2ND_ART_HOST_OUT_SHARED_LIBRARIES := $(2ND_HOST_OUT_SHARED_LIBRARIES)
 endif
diff --git a/build/Android.common_build.mk b/build/Android.common_build.mk
index 90609ec..5cfdbfd 100644
--- a/build/Android.common_build.mk
+++ b/build/Android.common_build.mk
@@ -121,10 +121,10 @@
   ART_HOST_CLANG := true
 endif
 
-# Clang on the target: only enabled for ARM64. Target builds use GCC by default.
+# Clang on the target. Target builds use GCC by default.
 ART_TARGET_CLANG :=
 ART_TARGET_CLANG_arm :=
-ART_TARGET_CLANG_arm64 := true
+ART_TARGET_CLANG_arm64 :=
 ART_TARGET_CLANG_mips :=
 ART_TARGET_CLANG_x86 :=
 ART_TARGET_CLANG_x86_64 :=
@@ -139,9 +139,6 @@
 
 ART_CPP_EXTENSION := .cc
 
-ART_HOST_SHLIB_EXTENSION := $(HOST_SHLIB_SUFFIX)
-ART_HOST_SHLIB_EXTENSION ?= .so
-
 ART_C_INCLUDES := \
   external/gtest/include \
   external/valgrind/main/include \
diff --git a/build/Android.common_path.mk b/build/Android.common_path.mk
index d672393..8c0d9f2 100644
--- a/build/Android.common_path.mk
+++ b/build/Android.common_path.mk
@@ -72,5 +72,4 @@
 
 HOST_CORE_DEX_FILES   := $(foreach jar,$(HOST_CORE_JARS),  $(call intermediates-dir-for,JAVA_LIBRARIES,$(jar),t,COMMON)/javalib.jar)
 TARGET_CORE_DEX_FILES := $(foreach jar,$(TARGET_CORE_JARS),$(call intermediates-dir-for,JAVA_LIBRARIES,$(jar), ,COMMON)/javalib.jar)
-
 endif # ANDROID_COMMON_PATH_MK
diff --git a/build/Android.common_test.mk b/build/Android.common_test.mk
index 1815f50..21a8931 100644
--- a/build/Android.common_test.mk
+++ b/build/Android.common_test.mk
@@ -99,6 +99,7 @@
     LOCAL_MODULE_PATH := $(3)
     LOCAL_DEX_PREOPT_IMAGE_LOCATION := $(TARGET_CORE_IMG_OUT)
     include $(BUILD_JAVA_LIBRARY)
+    $(5) := $$(LOCAL_INSTALLED_MODULE)
   endif
   ifeq ($(ART_BUILD_HOST),true)
     include $(CLEAR_VARS)
@@ -110,8 +111,8 @@
     LOCAL_JAVA_LIBRARIES := $(HOST_CORE_JARS)
     LOCAL_DEX_PREOPT_IMAGE := $(HOST_CORE_IMG_LOCATION)
     include $(BUILD_HOST_DALVIK_JAVA_LIBRARY)
+    $(5)-host := $$(LOCAL_INSTALLED_MODULE)
   endif
-  $(5) := $$(LOCAL_INSTALLED_MODULE)
 endef
 
 endif # ANDROID_COMMON_TEST_MK
diff --git a/build/Android.gtest.mk b/build/Android.gtest.mk
index 9723b89a..69d6c5a 100644
--- a/build/Android.gtest.mk
+++ b/build/Android.gtest.mk
@@ -45,20 +45,17 @@
   $(ART_TARGET_NATIVETEST_OUT),art/build/Android.gtest.mk,ART_GTEST_$(dir)_DEX)))
 
 # Dex file dependencies for each gtest.
-ART_GTEST_class_linker_test_DEPS := $(ART_GTEST_Interfaces_DEX) $(ART_GTEST_MyClass_DEX) \
-  $(ART_GTEST_Nested_DEX) $(ART_GTEST_Statics_DEX) $(ART_GTEST_StaticsFromCode_DEX)
-ART_GTEST_compiler_test_DEPS := $(ART_GTEST_AbstractMethod_DEX)
-ART_GTEST_dex_file_test_DEPS := $(ART_GTEST_GetMethodSignature_DEX)
-ART_GTEST_exception_test_DEPS := $(ART_GTEST_ExceptionHandle_DEX)
-ART_GTEST_jni_compiler_test_DEPS := $(ART_GTEST_MyClassNatives_DEX)
-ART_GTEST_jni_internal_test_DEPS := $(ART_GTEST_AllFields_DEX)
-ART_GTEST_object_test_DEPS := $(ART_GTEST_ProtoCompare_DEX) $(ART_GTEST_ProtoCompare2_DEX) \
-   $(ART_GTEST_StaticsFromCode_DEX) $(ART_GTEST_XandY_DEX)
-ART_GTEST_proxy_test_DEPS := $(ART_GTEST_Interfaces_DEX)
-ART_GTEST_reflection_test_DEPS := $(ART_GTEST_Main_DEX) $(ART_GTEST_NonStaticLeafMethods_DEX) \
-  $(ART_GTEST_StaticLeafMethods_DEX)
-ART_GTEST_stub_test_DEPS := $(ART_GTEST_AllFields_DEX)
-ART_GTEST_transaction_test_DEPS := $(ART_GTEST_Transaction_DEX)
+ART_GTEST_class_linker_test_DEPS := Interfaces MyClass Nested Statics StaticsFromCode
+ART_GTEST_compiler_driver_test_DEPS := AbstractMethod
+ART_GTEST_dex_file_test_DEPS := GetMethodSignature
+ART_GTEST_exception_test_DEPS := ExceptionHandle
+ART_GTEST_jni_compiler_test_DEPS := MyClassNatives
+ART_GTEST_jni_internal_test_DEPS := AllFields StaticLeafMethods
+ART_GTEST_object_test_DEPS := ProtoCompare ProtoCompare2 StaticsFromCode XandY
+ART_GTEST_proxy_test_DEPS := Interfaces
+ART_GTEST_reflection_test_DEPS := Main NonStaticLeafMethods StaticLeafMethods
+ART_GTEST_stub_test_DEPS := AllFields
+ART_GTEST_transaction_test_DEPS := Transaction
 
 # The elf writer test has dependencies on core.oat.
 ART_GTEST_elf_writer_test_DEPS := $(HOST_CORE_OAT_OUT) $(2ND_HOST_CORE_OAT_OUT) \
@@ -194,12 +191,12 @@
 define define-art-gtest-rule-target
   gtest_rule := test-art-target-gtest-$(1)$$($(2)ART_PHONY_TEST_TARGET_SUFFIX)
 
-  # Add the test dependencies to test-art-target-sync, which will be a prerequisit for the test
+  # Add the test dependencies to test-art-target-sync, which will be a prerequisite for the test
   # to ensure files are pushed to the device.
   TEST_ART_TARGET_SYNC_DEPS += \
-    $$(ART_GTEST_$(1)_DEPS) \
+    $(foreach file,$(ART_GTEST_$(1)_DEPS),$(ART_GTEST_$(file)_DEX)) \
     $$(ART_TARGET_NATIVETEST_OUT)/$$(TARGET_$(2)ARCH)/$(1) \
-    $$(TARGET_CORE_DEX_LOCATIONS)
+    $$($(2)TARGET_OUT_SHARED_LIBRARIES)/libjavacore.so
 
 .PHONY: $$(gtest_rule)
 $$(gtest_rule): test-art-target-sync
@@ -227,9 +224,13 @@
 define define-art-gtest-rule-host
   gtest_rule := test-art-host-gtest-$(1)$$($(2)ART_PHONY_TEST_HOST_SUFFIX)
   gtest_exe := $$(HOST_OUT_EXECUTABLES)/$(1)$$($(2)ART_PHONY_TEST_HOST_SUFFIX)
+  # Dependencies for all host gtests.
+  gtest_deps := $$(HOST_CORE_DEX_LOCATIONS) \
+    $$($(2)ART_HOST_LIBRARY_PATH)/libjavacore$$(ART_HOST_SHLIB_EXTENSION)
+
 
 .PHONY: $$(gtest_rule)
-$$(gtest_rule): $$(gtest_exe) $$(ART_GTEST_$(1)_DEPS) $$(HOST_CORE_DEX_LOCATIONS)
+$$(gtest_rule): $$(gtest_exe) $(foreach file,$(ART_GTEST_$(1)_DEPS),$(ART_GTEST_$(file)_DEX-host)) $$(gtest_deps)
 	$(hide) ($$(call ART_TEST_SKIP,$$@) && $$< && $$(call ART_TEST_PASSED,$$@)) \
 	  || $$(call ART_TEST_FAILED,$$@)
 
@@ -238,7 +239,7 @@
   ART_TEST_HOST_GTEST_$(1)_RULES += $$(gtest_rule)
 
 .PHONY: valgrind-$$(gtest_rule)
-valgrind-$$(gtest_rule): $$(gtest_exe) test-art-host-dependencies $$(ART_GTEST_$(1)_DEPS)
+valgrind-$$(gtest_rule): $$(gtest_exe) $$(ART_GTEST_$(1)_DEPS) $$(gtest_deps)
 	$(hide) $$(call ART_TEST_SKIP,$$@) && \
 	  valgrind --leak-check=full --error-exitcode=1 $$< && $$(call ART_TEST_PASSED,$$@) \
 	    || $$(call ART_TEST_FAILED,$$@)
@@ -251,6 +252,7 @@
   valgrind_gtest_rule :=
   gtest_rule :=
   gtest_exe :=
+  gtest_deps :=
 endef  # define-art-gtest-rule-host
 
 # Define the rules to build and run host and target gtests.
@@ -443,7 +445,7 @@
 ART_TEST_TARGET_GTEST$(2ND_ART_PHONY_TEST_TARGET_SUFFIX)_RULES :=
 ART_TEST_TARGET_GTEST_RULES :=
 ART_GTEST_class_linker_test_DEPS :=
-ART_GTEST_compiler_test_DEPS :=
+ART_GTEST_compiler_driver_test_DEPS :=
 ART_GTEST_dex_file_test_DEPS :=
 ART_GTEST_exception_test_DEPS :=
 ART_GTEST_jni_compiler_test_DEPS :=
@@ -455,4 +457,4 @@
 ART_GTEST_transaction_test_DEPS :=
 $(foreach dir,$(GTEST_DEX_DIRECTORIES), $(eval ART_GTEST_TEST_$(dir)_DEX :=))
 GTEST_DEX_DIRECTORIES :=
-LOCAL_PATH :=
\ No newline at end of file
+LOCAL_PATH :=
diff --git a/compiler/dex/quick/x86/assemble_x86.cc b/compiler/dex/quick/x86/assemble_x86.cc
index f06f08e..3f362f2 100644
--- a/compiler/dex/quick/x86/assemble_x86.cc
+++ b/compiler/dex/quick/x86/assemble_x86.cc
@@ -423,11 +423,11 @@
   { kX86Fld64M,   kMem,     IS_LOAD    | IS_UNARY_OP | REG_USE0 | USE_FP_STACK, { 0x0,  0,    0xDD, 0x00, 0, 0, 0, 0, false }, "Fld64M",   "[!0r,!1d]" },
   { kX86Fstp32M,  kMem,     IS_STORE   | IS_UNARY_OP | REG_USE0 | USE_FP_STACK, { 0x0,  0,    0xD9, 0x00, 0, 3, 0, 0, false }, "Fstps32M", "[!0r,!1d]" },
   { kX86Fstp64M,  kMem,     IS_STORE   | IS_UNARY_OP | REG_USE0 | USE_FP_STACK, { 0x0,  0,    0xDD, 0x00, 0, 3, 0, 0, false }, "Fstpd64M", "[!0r,!1d]" },
-  { kX86Fst32M,   kMem,     IS_STORE   | IS_UNARY_OP | REG_USE0,                { 0x0,  0,    0xD9, 0x00, 0, 2, 0, 0, false }, "Fsts32M",  "[!0r,!1d]" },
-  { kX86Fst64M,   kMem,     IS_STORE   | IS_UNARY_OP | REG_USE0,                { 0x0,  0,    0xDD, 0x00, 0, 2, 0, 0, false }, "Fstd64M",  "[!0r,!1d]" },
+  { kX86Fst32M,   kMem,     IS_STORE   | IS_UNARY_OP | REG_USE0 | USE_FP_STACK, { 0x0,  0,    0xD9, 0x00, 0, 2, 0, 0, false }, "Fsts32M",  "[!0r,!1d]" },
+  { kX86Fst64M,   kMem,     IS_STORE   | IS_UNARY_OP | REG_USE0 | USE_FP_STACK, { 0x0,  0,    0xDD, 0x00, 0, 2, 0, 0, false }, "Fstd64M",  "[!0r,!1d]" },
   { kX86Fprem,    kNullary, NO_OPERAND | USE_FP_STACK,                          { 0xD9, 0,    0xF8, 0,    0, 0, 0, 0, false }, "Fprem64",  "" },
   { kX86Fucompp,  kNullary, NO_OPERAND | USE_FP_STACK,                          { 0xDA, 0,    0xE9, 0,    0, 0, 0, 0, false }, "Fucompp",  "" },
-  { kX86Fstsw16R, kNullary, NO_OPERAND,                                         { 0x9B, 0xDF, 0xE0, 0,    0, 0, 0, 0, false }, "Fstsw16R", "ax" },
+  { kX86Fstsw16R, kNullary, NO_OPERAND | USE_FP_STACK,                          { 0x9B, 0xDF, 0xE0, 0,    0, 0, 0, 0, false }, "Fstsw16R", "ax" },
 
   EXT_0F_ENCODING_MAP(Mova128,    0x66, 0x6F, REG_DEF0),
   { kX86Mova128MR, kMemReg,   IS_STORE | IS_TERTIARY_OP | REG_USE02,  { 0x66, 0, 0x0F, 0x6F, 0, 0, 0, 0, false }, "Mova128MR", "[!0r+!1d],!2r" },
diff --git a/compiler/dex/quick/x86/int_x86.cc b/compiler/dex/quick/x86/int_x86.cc
index 350cfb8..fd20a81 100644
--- a/compiler/dex/quick/x86/int_x86.cc
+++ b/compiler/dex/quick/x86/int_x86.cc
@@ -2688,7 +2688,7 @@
     Mir2Lir::GenIntToLong(rl_dest, rl_src);
     return;
   }
-  rl_src = UpdateLoc(rl_src);
+  rl_src = UpdateLocTyped(rl_src, kCoreReg);
   RegLocation rl_result = EvalLoc(rl_dest, kCoreReg, true);
   if (rl_src.location == kLocPhysReg) {
     NewLIR2(kX86MovsxdRR, rl_result.reg.GetReg(), rl_src.reg.GetReg());
diff --git a/compiler/dex/quick/x86/target_x86.cc b/compiler/dex/quick/x86/target_x86.cc
index e369d26..408a40a 100644
--- a/compiler/dex/quick/x86/target_x86.cc
+++ b/compiler/dex/quick/x86/target_x86.cc
@@ -2150,7 +2150,8 @@
   if (in_to_reg_storage_mapping.IsThereStackMapped()) {
     RegStorage regSingle = TargetReg(kArg2);
     RegStorage regWide = RegStorage::Solo64(TargetReg(kArg3).GetReg());
-    for (int i = start_index; i <= last_mapped_in + regs_left_to_pass_via_stack; i++) {
+    for (int i = start_index;
+         i < last_mapped_in + size_of_the_last_mapped + regs_left_to_pass_via_stack; i++) {
       RegLocation rl_arg = info->args[i];
       rl_arg = UpdateRawLoc(rl_arg);
       RegStorage reg = in_to_reg_storage_mapping.Get(i);
@@ -2166,7 +2167,6 @@
               LoadValueDirectWideFixed(rl_arg, regWide);
               StoreBaseDisp(TargetReg(kSp), out_offset, regWide, k64, kNotVolatile);
             }
-            i++;
           } else {
             if (rl_arg.location == kLocPhysReg) {
               StoreBaseDisp(TargetReg(kSp), out_offset, rl_arg.reg, k32, kNotVolatile);
@@ -2179,6 +2179,9 @@
         call_state = next_call_insn(cu_, info, call_state, target_method,
                                     vtable_idx, direct_code, direct_method, type);
       }
+      if (rl_arg.wide) {
+        i++;
+      }
     }
   }
 
@@ -2190,13 +2193,15 @@
     if (reg.Valid()) {
       if (rl_arg.wide) {
         LoadValueDirectWideFixed(rl_arg, reg);
-        i++;
       } else {
         LoadValueDirectFixed(rl_arg, reg);
       }
       call_state = next_call_insn(cu_, info, call_state, target_method, vtable_idx,
                                direct_code, direct_method, type);
     }
+    if (rl_arg.wide) {
+      i++;
+    }
   }
 
   call_state = next_call_insn(cu_, info, call_state, target_method, vtable_idx,
diff --git a/compiler/optimizing/register_allocator_test.cc b/compiler/optimizing/register_allocator_test.cc
index bfabc5a..a7283ab 100644
--- a/compiler/optimizing/register_allocator_test.cc
+++ b/compiler/optimizing/register_allocator_test.cc
@@ -318,4 +318,42 @@
   ASSERT_EQ(phi_interval->GetRegister(), ret->InputAt(0)->GetLiveInterval()->GetRegister());
 }
 
+TEST(RegisterAllocatorTest, FirstRegisterUse) {
+  const uint16_t data[] = THREE_REGISTERS_CODE_ITEM(
+    Instruction::CONST_4 | 0 | 0,
+    Instruction::ADD_INT_LIT8 | 1 << 8, 1 << 8,
+    Instruction::ADD_INT_LIT8 | 0 << 8, 1 << 8,
+    Instruction::ADD_INT_LIT8 | 1 << 8, 1 << 8 | 1,
+    Instruction::RETURN_VOID);
+
+  ArenaPool pool;
+  ArenaAllocator allocator(&pool);
+  HGraph* graph = BuildSSAGraph(data, &allocator);
+  CodeGenerator* codegen = CodeGenerator::Create(&allocator, graph, kArm);
+  SsaLivenessAnalysis liveness(*graph, codegen);
+  liveness.Analyze();
+
+  HAdd* first_add = graph->GetBlocks().Get(1)->GetFirstInstruction()->AsAdd();
+  HAdd* last_add = graph->GetBlocks().Get(1)->GetLastInstruction()->GetPrevious()->AsAdd();
+  ASSERT_EQ(last_add->InputAt(0), first_add);
+  LiveInterval* interval = first_add->GetLiveInterval();
+  ASSERT_EQ(interval->GetEnd(), last_add->GetLifetimePosition() + 1);
+  ASSERT_TRUE(interval->GetNextSibling() == nullptr);
+
+  // We need a register for the output of the instruction.
+  ASSERT_EQ(interval->FirstRegisterUse(), first_add->GetLifetimePosition());
+
+  // Split at the next instruction.
+  interval = interval->SplitAt(first_add->GetLifetimePosition() + 2);
+  // The user of the split is the last add.
+  ASSERT_EQ(interval->FirstRegisterUse(), last_add->GetLifetimePosition() + 1);
+
+  // Split before the last add.
+  LiveInterval* new_interval = interval->SplitAt(last_add->GetLifetimePosition() - 1);
+  // Ensure the current interval has no register use...
+  ASSERT_EQ(interval->FirstRegisterUse(), kNoLifetime);
+  // And the new interval has it for the last add.
+  ASSERT_EQ(new_interval->FirstRegisterUse(), last_add->GetLifetimePosition() + 1);
+}
+
 }  // namespace art
diff --git a/compiler/optimizing/ssa_liveness_analysis.h b/compiler/optimizing/ssa_liveness_analysis.h
index fc3eb66..83035b5 100644
--- a/compiler/optimizing/ssa_liveness_analysis.h
+++ b/compiler/optimizing/ssa_liveness_analysis.h
@@ -289,21 +289,23 @@
 
   size_t FirstRegisterUseAfter(size_t position) const {
     if (position == GetStart() && defined_by_ != nullptr) {
-      Location location = defined_by_->GetLocations()->Out();
+      LocationSummary* locations = defined_by_->GetLocations();
+      Location location = locations->Out();
       // This interval is the first interval of the instruction. If the output
       // of the instruction requires a register, we return the position of that instruction
       // as the first register use.
       if (location.IsUnallocated()) {
         if ((location.GetPolicy() == Location::kRequiresRegister)
              || (location.GetPolicy() == Location::kSameAsFirstInput
-                && defined_by_->GetLocations()->InAt(0).GetPolicy() == Location::kRequiresRegister)) {
+                 && locations->InAt(0).GetPolicy() == Location::kRequiresRegister)) {
           return position;
         }
       }
     }
 
     UsePosition* use = first_use_;
-    while (use != nullptr) {
+    size_t end = GetEnd();
+    while (use != nullptr && use->GetPosition() <= end) {
       size_t use_position = use->GetPosition();
       if (use_position >= position && !use->GetIsEnvironment()) {
         Location location = use->GetUser()->GetLocations()->InAt(use->GetInputIndex());
diff --git a/runtime/arch/stub_test.cc b/runtime/arch/stub_test.cc
index 22b8cca..a31c08b 100644
--- a/runtime/arch/stub_test.cc
+++ b/runtime/arch/stub_test.cc
@@ -258,7 +258,7 @@
           "d16", "d17", "d18", "d19", "d20", "d21", "d22", "d23",
           "d24", "d25", "d26", "d27", "d28", "d29", "d30", "d31",
           "memory");  // clobber.
-#elif defined(__x86_64__)
+#elif defined(__x86_64__) && !defined(__APPLE__)
     // Note: Uses the native convention
     // TODO: Set the thread?
     __asm__ __volatile__(
@@ -483,7 +483,7 @@
           "d16", "d17", "d18", "d19", "d20", "d21", "d22", "d23",
           "d24", "d25", "d26", "d27", "d28", "d29", "d30", "d31",
           "memory");  // clobber.
-#elif defined(__x86_64__)
+#elif defined(__x86_64__) && !defined(__APPLE__)
     // Note: Uses the native convention
     // TODO: Set the thread?
     __asm__ __volatile__(
@@ -518,7 +518,7 @@
   // Method with 32b arg0, 64b arg1
   size_t Invoke3UWithReferrer(size_t arg0, uint64_t arg1, uintptr_t code, Thread* self,
                               mirror::ArtMethod* referrer) {
-#if defined(__x86_64__) || defined(__aarch64__)
+#if (defined(__x86_64__) && !defined(__APPLE__)) || defined(__aarch64__)
     // Just pass through.
     return Invoke3WithReferrer(arg0, arg1, 0U, code, self, referrer);
 #else
@@ -533,7 +533,7 @@
   // Method with 32b arg0, 32b arg1, 64b arg2
   size_t Invoke3UUWithReferrer(uint32_t arg0, uint32_t arg1, uint64_t arg2, uintptr_t code,
                                Thread* self, mirror::ArtMethod* referrer) {
-#if defined(__x86_64__) || defined(__aarch64__)
+#if (defined(__x86_64__) && !defined(__APPLE__)) || defined(__aarch64__)
     // Just pass through.
     return Invoke3WithReferrer(arg0, arg1, arg2, code, self, referrer);
 #else
@@ -547,12 +547,12 @@
 };
 
 
-#if defined(__i386__) || defined(__x86_64__)
+#if defined(__i386__) || (defined(__x86_64__) && !defined(__APPLE__))
 extern "C" void art_quick_memcpy(void);
 #endif
 
 TEST_F(StubTest, Memcpy) {
-#if defined(__i386__) || defined(__x86_64__)
+#if defined(__i386__) || (defined(__x86_64__) && !defined(__APPLE__))
   Thread* self = Thread::Current();
 
   uint32_t orig[20];
@@ -588,12 +588,12 @@
 #endif
 }
 
-#if defined(__i386__) || defined(__arm__) || defined(__aarch64__) || defined(__x86_64__)
+#if defined(__i386__) || defined(__arm__) || defined(__aarch64__) || (defined(__x86_64__) && !defined(__APPLE__))
 extern "C" void art_quick_lock_object(void);
 #endif
 
 TEST_F(StubTest, LockObject) {
-#if defined(__i386__) || defined(__arm__) || defined(__aarch64__) || defined(__x86_64__)
+#if defined(__i386__) || defined(__arm__) || defined(__aarch64__) || (defined(__x86_64__) && !defined(__APPLE__))
   static constexpr size_t kThinLockLoops = 100;
 
   Thread* self = Thread::Current();
@@ -664,14 +664,14 @@
 };
 
 
-#if defined(__i386__) || defined(__arm__) || defined(__aarch64__) || defined(__x86_64__)
+#if defined(__i386__) || defined(__arm__) || defined(__aarch64__) || (defined(__x86_64__) && !defined(__APPLE__))
 extern "C" void art_quick_lock_object(void);
 extern "C" void art_quick_unlock_object(void);
 #endif
 
 // NO_THREAD_SAFETY_ANALYSIS as we do not want to grab exclusive mutator lock for MonitorInfo.
 static void TestUnlockObject(StubTest* test) NO_THREAD_SAFETY_ANALYSIS {
-#if defined(__i386__) || defined(__arm__) || defined(__aarch64__) || defined(__x86_64__)
+#if defined(__i386__) || defined(__arm__) || defined(__aarch64__) || (defined(__x86_64__) && !defined(__APPLE__))
   static constexpr size_t kThinLockLoops = 100;
 
   Thread* self = Thread::Current();
@@ -817,12 +817,12 @@
   TestUnlockObject(this);
 }
 
-#if defined(__i386__) || defined(__arm__) || defined(__aarch64__) || defined(__x86_64__)
+#if defined(__i386__) || defined(__arm__) || defined(__aarch64__) || (defined(__x86_64__) && !defined(__APPLE__))
 extern "C" void art_quick_check_cast(void);
 #endif
 
 TEST_F(StubTest, CheckCast) {
-#if defined(__i386__) || defined(__arm__) || defined(__aarch64__) || defined(__x86_64__)
+#if defined(__i386__) || defined(__arm__) || defined(__aarch64__) || (defined(__x86_64__) && !defined(__APPLE__))
   Thread* self = Thread::Current();
   // Find some classes.
   ScopedObjectAccess soa(self);
@@ -867,7 +867,7 @@
 }
 
 
-#if defined(__i386__) || defined(__arm__) || defined(__aarch64__) || defined(__x86_64__)
+#if defined(__i386__) || defined(__arm__) || defined(__aarch64__) || (defined(__x86_64__) && !defined(__APPLE__))
 extern "C" void art_quick_aput_obj_with_null_and_bound_check(void);
 // Do not check non-checked ones, we'd need handlers and stuff...
 #endif
@@ -875,7 +875,7 @@
 TEST_F(StubTest, APutObj) {
   TEST_DISABLED_FOR_HEAP_REFERENCE_POISONING();
 
-#if defined(__i386__) || defined(__arm__) || defined(__aarch64__) || defined(__x86_64__)
+#if defined(__i386__) || defined(__arm__) || defined(__aarch64__) || (defined(__x86_64__) && !defined(__APPLE__))
   Thread* self = Thread::Current();
   // Create an object
   ScopedObjectAccess soa(self);
@@ -1003,7 +1003,7 @@
 TEST_F(StubTest, AllocObject) {
   TEST_DISABLED_FOR_HEAP_REFERENCE_POISONING();
 
-#if defined(__i386__) || defined(__arm__) || defined(__aarch64__) || defined(__x86_64__)
+#if defined(__i386__) || defined(__arm__) || defined(__aarch64__) || (defined(__x86_64__) && !defined(__APPLE__))
   // TODO: Check the "Unresolved" allocation stubs
 
   Thread* self = Thread::Current();
@@ -1125,7 +1125,7 @@
 TEST_F(StubTest, AllocObjectArray) {
   TEST_DISABLED_FOR_HEAP_REFERENCE_POISONING();
 
-#if defined(__i386__) || defined(__arm__) || defined(__aarch64__) || defined(__x86_64__)
+#if defined(__i386__) || defined(__arm__) || defined(__aarch64__) || (defined(__x86_64__) && !defined(__APPLE__))
   // TODO: Check the "Unresolved" allocation stubs
 
   Thread* self = Thread::Current();
@@ -1204,14 +1204,14 @@
 }
 
 
-#if defined(__i386__) || defined(__arm__) || defined(__aarch64__) || defined(__x86_64__)
+#if defined(__i386__) || defined(__arm__) || defined(__aarch64__) || (defined(__x86_64__) && !defined(__APPLE__))
 extern "C" void art_quick_string_compareto(void);
 #endif
 
 TEST_F(StubTest, StringCompareTo) {
   TEST_DISABLED_FOR_HEAP_REFERENCE_POISONING();
 
-#if defined(__i386__) || defined(__arm__) || defined(__aarch64__) || defined(__x86_64__)
+#if defined(__i386__) || defined(__arm__) || defined(__aarch64__) || (defined(__x86_64__) && !defined(__APPLE__))
   // TODO: Check the "Unresolved" allocation stubs
 
   Thread* self = Thread::Current();
@@ -1301,7 +1301,7 @@
 }
 
 
-#if defined(__i386__) || defined(__arm__) || defined(__aarch64__) || defined(__x86_64__)
+#if defined(__i386__) || defined(__arm__) || defined(__aarch64__) || (defined(__x86_64__) && !defined(__APPLE__))
 extern "C" void art_quick_set32_static(void);
 extern "C" void art_quick_get32_static(void);
 #endif
@@ -1309,7 +1309,7 @@
 static void GetSet32Static(Handle<mirror::Object>* obj, Handle<mirror::ArtField>* f, Thread* self,
                            mirror::ArtMethod* referrer, StubTest* test)
     SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
-#if defined(__i386__) || defined(__arm__) || defined(__aarch64__) || defined(__x86_64__)
+#if defined(__i386__) || defined(__arm__) || defined(__aarch64__) || (defined(__x86_64__) && !defined(__APPLE__))
   constexpr size_t num_values = 7;
   uint32_t values[num_values] = { 0, 1, 2, 255, 32768, 1000000, 0xFFFFFFFF };
 
@@ -1337,7 +1337,7 @@
 }
 
 
-#if defined(__i386__) || defined(__arm__) || defined(__aarch64__) || defined(__x86_64__)
+#if defined(__i386__) || defined(__arm__) || defined(__aarch64__) || (defined(__x86_64__) && !defined(__APPLE__))
 extern "C" void art_quick_set32_instance(void);
 extern "C" void art_quick_get32_instance(void);
 #endif
@@ -1345,7 +1345,7 @@
 static void GetSet32Instance(Handle<mirror::Object>* obj, Handle<mirror::ArtField>* f,
                              Thread* self, mirror::ArtMethod* referrer, StubTest* test)
     SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
-#if defined(__i386__) || defined(__arm__) || defined(__aarch64__) || defined(__x86_64__)
+#if defined(__i386__) || defined(__arm__) || defined(__aarch64__) || (defined(__x86_64__) && !defined(__APPLE__))
   constexpr size_t num_values = 7;
   uint32_t values[num_values] = { 0, 1, 2, 255, 32768, 1000000, 0xFFFFFFFF };
 
@@ -1379,7 +1379,7 @@
 }
 
 
-#if defined(__i386__) || defined(__arm__) || defined(__aarch64__) || defined(__x86_64__)
+#if defined(__i386__) || defined(__arm__) || defined(__aarch64__) || (defined(__x86_64__) && !defined(__APPLE__))
 extern "C" void art_quick_set_obj_static(void);
 extern "C" void art_quick_get_obj_static(void);
 
@@ -1406,7 +1406,7 @@
 static void GetSetObjStatic(Handle<mirror::Object>* obj, Handle<mirror::ArtField>* f, Thread* self,
                             mirror::ArtMethod* referrer, StubTest* test)
     SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
-#if defined(__i386__) || defined(__arm__) || defined(__aarch64__) || defined(__x86_64__)
+#if defined(__i386__) || defined(__arm__) || defined(__aarch64__) || (defined(__x86_64__) && !defined(__APPLE__))
   set_and_check_static((*f)->GetDexFieldIndex(), nullptr, self, referrer, test);
 
   // Allocate a string object for simplicity.
@@ -1422,7 +1422,7 @@
 }
 
 
-#if defined(__i386__) || defined(__arm__) || defined(__aarch64__) || defined(__x86_64__)
+#if defined(__i386__) || defined(__arm__) || defined(__aarch64__) || (defined(__x86_64__) && !defined(__APPLE__))
 extern "C" void art_quick_set_obj_instance(void);
 extern "C" void art_quick_get_obj_instance(void);
 
@@ -1453,7 +1453,7 @@
 static void GetSetObjInstance(Handle<mirror::Object>* obj, Handle<mirror::ArtField>* f,
                               Thread* self, mirror::ArtMethod* referrer, StubTest* test)
     SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
-#if defined(__i386__) || defined(__arm__) || defined(__aarch64__) || defined(__x86_64__)
+#if defined(__i386__) || defined(__arm__) || defined(__aarch64__) || (defined(__x86_64__) && !defined(__APPLE__))
   set_and_check_instance(f, obj->Get(), nullptr, self, referrer, test);
 
   // Allocate a string object for simplicity.
@@ -1471,7 +1471,7 @@
 
 // TODO: Complete these tests for 32b architectures.
 
-#if defined(__x86_64__) || defined(__aarch64__)
+#if (defined(__x86_64__) && !defined(__APPLE__)) || defined(__aarch64__)
 extern "C" void art_quick_set64_static(void);
 extern "C" void art_quick_get64_static(void);
 #endif
@@ -1479,7 +1479,7 @@
 static void GetSet64Static(Handle<mirror::Object>* obj, Handle<mirror::ArtField>* f, Thread* self,
                            mirror::ArtMethod* referrer, StubTest* test)
     SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
-#if defined(__x86_64__) || defined(__aarch64__)
+#if (defined(__x86_64__) && !defined(__APPLE__)) || defined(__aarch64__)
   constexpr size_t num_values = 8;
   uint64_t values[num_values] = { 0, 1, 2, 255, 32768, 1000000, 0xFFFFFFFF, 0xFFFFFFFFFFFF };
 
@@ -1506,7 +1506,7 @@
 }
 
 
-#if defined(__x86_64__) || defined(__aarch64__)
+#if (defined(__x86_64__) && !defined(__APPLE__)) || defined(__aarch64__)
 extern "C" void art_quick_set64_instance(void);
 extern "C" void art_quick_get64_instance(void);
 #endif
@@ -1514,7 +1514,7 @@
 static void GetSet64Instance(Handle<mirror::Object>* obj, Handle<mirror::ArtField>* f,
                              Thread* self, mirror::ArtMethod* referrer, StubTest* test)
     SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
-#if defined(__x86_64__) || defined(__aarch64__)
+#if (defined(__x86_64__) && !defined(__APPLE__)) || defined(__aarch64__)
   constexpr size_t num_values = 8;
   uint64_t values[num_values] = { 0, 1, 2, 255, 32768, 1000000, 0xFFFFFFFF, 0xFFFFFFFFFFFF };
 
@@ -1678,12 +1678,12 @@
   TestFields(self, this, Primitive::Type::kPrimLong);
 }
 
-#if defined(__i386__) || defined(__arm__) || defined(__aarch64__) || defined(__x86_64__)
+#if defined(__i386__) || defined(__arm__) || defined(__aarch64__) || (defined(__x86_64__) && !defined(__APPLE__))
 extern "C" void art_quick_imt_conflict_trampoline(void);
 #endif
 
 TEST_F(StubTest, IMT) {
-#if defined(__i386__) || defined(__arm__) || defined(__aarch64__) || defined(__x86_64__)
+#if defined(__i386__) || defined(__arm__) || defined(__aarch64__) || (defined(__x86_64__) && !defined(__APPLE__))
   TEST_DISABLED_FOR_HEAP_REFERENCE_POISONING();
 
   Thread* self = Thread::Current();
diff --git a/runtime/arch/x86_64/asm_support_x86_64.S b/runtime/arch/x86_64/asm_support_x86_64.S
index 34c8b82..d3cd4df 100644
--- a/runtime/arch/x86_64/asm_support_x86_64.S
+++ b/runtime/arch/x86_64/asm_support_x86_64.S
@@ -19,7 +19,7 @@
 
 #include "asm_support_x86_64.h"
 
-#if defined(__clang__) && (__clang_major__ < 4) && (__clang_minor__ < 5)
+#if defined(__APPLE__) || (defined(__clang__) && (__clang_major__ < 4) && (__clang_minor__ < 5))
     // Clang's as(1) doesn't let you name macro parameters prior to 3.5.
     #define MACRO0(macro_name) .macro macro_name
     #define MACRO1(macro_name, macro_arg1) .macro macro_name
@@ -27,13 +27,12 @@
     #define MACRO3(macro_name, macro_arg1, macro_args2, macro_args3) .macro macro_name
     #define END_MACRO .endmacro
 
-    // Clang's as(1) uses $0, $1, and so on for macro arguments prior to 3.5.
+    // Clang's as(1) uses $0, $1, and so on for macro arguments.
+    #define RAW_VAR(name,index) $index
     #define VAR(name,index) SYMBOL($index)
-    #define PLT_VAR(name, index) SYMBOL($index)@PLT
+    #define PLT_VAR(name, index) PLT_SYMBOL($index)
     #define REG_VAR(name,index) %$index
     #define CALL_MACRO(name,index) $index
-    #define FUNCTION_TYPE(name,index) .type $index, @function
-    #define SIZE(name,index) .size $index, .-$index
 
     //  The use of $x for arguments mean that literals need to be represented with $$x in macros.
     #define LITERAL(value) $value
@@ -52,17 +51,27 @@
     // no special meaning to $, so literals are still just $x. The use of altmacro means % is a
     // special character meaning care needs to be taken when passing registers as macro arguments.
     .altmacro
+    #define RAW_VAR(name,index) name&
     #define VAR(name,index) name&
     #define PLT_VAR(name, index) name&@PLT
     #define REG_VAR(name,index) %name
     #define CALL_MACRO(name,index) name&
-    #define FUNCTION_TYPE(name,index) .type name&, @function
-    #define SIZE(name,index) .size name, .-name
 
     #define LITERAL(value) $value
     #define MACRO_LITERAL(value) $value
 #endif
 
+#if defined(__APPLE__)
+    #define FUNCTION_TYPE(name,index)
+    #define SIZE(name,index)
+#elif defined(__clang__) && (__clang_major__ < 4) && (__clang_minor__ < 5)
+    #define FUNCTION_TYPE(name,index) .type $index, @function
+    #define SIZE(name,index) .size $index, .-$index
+#else
+    #define FUNCTION_TYPE(name,index) .type name&, @function
+    #define SIZE(name,index) .size name, .-name
+#endif
+
     // CFI support.
 #if !defined(__APPLE__)
     #define CFI_STARTPROC .cfi_startproc
@@ -86,9 +95,14 @@
     // Symbols.
 #if !defined(__APPLE__)
     #define SYMBOL(name) name
-    #define PLT_SYMBOL(name) name ## @PLT
+    #if defined(__clang__) && (__clang_major__ < 4) && (__clang_minor__ < 5)
+        // TODO: Disabled for old clang 3.3, this leads to text relocations and there should be a
+        // better fix.
+        #define PLT_SYMBOL(name) name // ## @PLT
+    #else
+        #define PLT_SYMBOL(name) name ## @PLT
+    #endif
 #else
-    // Mac OS' symbols have an _ prefix.
     #define SYMBOL(name) _ ## name
     #define PLT_SYMBOL(name) _ ## name
 #endif
@@ -103,8 +117,6 @@
     .globl VAR(c_name, 0)
     ALIGN_FUNCTION_ENTRY
 VAR(c_name, 0):
-    // Have a local entrypoint that's not globl
-VAR(c_name, 0)_local:
     CFI_STARTPROC
     // Ensure we get a sane starting CFA.
     CFI_DEF_CFA(rsp, 8)
diff --git a/runtime/arch/x86_64/context_x86_64.cc b/runtime/arch/x86_64/context_x86_64.cc
index 0ccbd27..e1f47ee 100644
--- a/runtime/arch/x86_64/context_x86_64.cc
+++ b/runtime/arch/x86_64/context_x86_64.cc
@@ -59,8 +59,8 @@
     size_t j = 2;  // Offset j to skip return address spill.
     for (size_t i = 0; i < kNumberOfFloatRegisters; ++i) {
       if (((frame_info.FpSpillMask() >> i) & 1) != 0) {
-        fprs_[i] = fr.CalleeSaveAddress(spill_count + fp_spill_count - j,
-                                        frame_info.FrameSizeInBytes());
+        fprs_[i] = reinterpret_cast<uint64_t*>(
+            fr.CalleeSaveAddress(spill_count + fp_spill_count - j, frame_info.FrameSizeInBytes()));
         j++;
       }
     }
@@ -93,7 +93,7 @@
 
 bool X86_64Context::SetFPR(uint32_t reg, uintptr_t value) {
   CHECK_LT(reg, static_cast<uint32_t>(kNumberOfFloatRegisters));
-  CHECK_NE(fprs_[reg], &gZero);
+  CHECK_NE(fprs_[reg], reinterpret_cast<const uint64_t*>(&gZero));
   if (fprs_[reg] != nullptr) {
     *fprs_[reg] = value;
     return true;
diff --git a/runtime/arch/x86_64/entrypoints_init_x86_64.cc b/runtime/arch/x86_64/entrypoints_init_x86_64.cc
index 92aabee..b6f51f7 100644
--- a/runtime/arch/x86_64/entrypoints_init_x86_64.cc
+++ b/runtime/arch/x86_64/entrypoints_init_x86_64.cc
@@ -112,6 +112,9 @@
 
 void InitEntryPoints(InterpreterEntryPoints* ipoints, JniEntryPoints* jpoints,
                      PortableEntryPoints* ppoints, QuickEntryPoints* qpoints) {
+#if defined(__APPLE__)
+  UNIMPLEMENTED(FATAL);
+#else
   // Interpreter
   ipoints->pInterpreterToInterpreterBridge = artInterpreterToInterpreterBridge;
   ipoints->pInterpreterToCompiledCodeBridge = artInterpreterToCompiledCodeBridge;
@@ -216,6 +219,7 @@
   qpoints->pThrowNoSuchMethod = art_quick_throw_no_such_method;
   qpoints->pThrowNullPointer = art_quick_throw_null_pointer_exception;
   qpoints->pThrowStackOverflow = art_quick_throw_stack_overflow;
+#endif  // __APPLE__
 };
 
 }  // namespace art
diff --git a/runtime/arch/x86_64/quick_entrypoints_x86_64.S b/runtime/arch/x86_64/quick_entrypoints_x86_64.S
index c9220c8..668fb88 100644
--- a/runtime/arch/x86_64/quick_entrypoints_x86_64.S
+++ b/runtime/arch/x86_64/quick_entrypoints_x86_64.S
@@ -23,6 +23,10 @@
      * Runtime::CreateCalleeSaveMethod(kSaveAll)
      */
 MACRO0(SETUP_SAVE_ALL_CALLEE_SAVE_FRAME)
+#if defined(__APPLE__)
+    int3
+    int3
+#else
     // R10 := Runtime::Current()
     movq _ZN3art7Runtime9instance_E@GOTPCREL(%rip), %r10
     movq (%r10), %r10
@@ -45,6 +49,7 @@
 #if (FRAME_SIZE_SAVE_ALL_CALLEE_SAVE != 6*8 + 8 + 8)
 #error "SAVE_ALL_CALLEE_SAVE_FRAME(X86_64) size not as expected."
 #endif
+#endif  // __APPLE__
 END_MACRO
 
     /*
@@ -52,6 +57,10 @@
      * Runtime::CreateCalleeSaveMethod(kRefsOnly)
      */
 MACRO0(SETUP_REF_ONLY_CALLEE_SAVE_FRAME)
+#if defined(__APPLE__)
+    int3
+    int3
+#else
     // R10 := Runtime::Current()
     movq _ZN3art7Runtime9instance_E@GOTPCREL(%rip), %r10
     movq (%r10), %r10
@@ -74,6 +83,7 @@
 #if (FRAME_SIZE_REFS_ONLY_CALLEE_SAVE != 6*8 + 8 + 8)
 #error "REFS_ONLY_CALLEE_SAVE_FRAME(X86_64) size not as expected."
 #endif
+#endif  // __APPLE__
 END_MACRO
 
 MACRO0(RESTORE_REF_ONLY_CALLEE_SAVE_FRAME)
@@ -93,6 +103,10 @@
      * Runtime::CreateCalleeSaveMethod(kRefsAndArgs)
      */
 MACRO0(SETUP_REF_AND_ARGS_CALLEE_SAVE_FRAME)
+#if defined(__APPLE__)
+    int3
+    int3
+#else
     // R10 := Runtime::Current()
     movq _ZN3art7Runtime9instance_E@GOTPCREL(%rip), %r10
     movq (%r10), %r10
@@ -130,6 +144,7 @@
 #if (FRAME_SIZE_REFS_AND_ARGS_CALLEE_SAVE != 11*8 + 80 + 8)
 #error "REFS_AND_ARGS_CALLEE_SAVE_FRAME(X86_64) size not as expected."
 #endif
+#endif  // __APPLE__
 END_MACRO
 
 MACRO0(RESTORE_REF_AND_ARGS_CALLEE_SAVE_FRAME)
@@ -366,6 +381,10 @@
      *   r9 = char* shorty
      */
 DEFINE_FUNCTION art_quick_invoke_stub
+#if defined(__APPLE__)
+    int3
+    int3
+#else
     // Set up argument XMM registers.
     leaq 1(%r9), %r10             // R10 := shorty + 1  ; ie skip return arg character.
     leaq 4(%rsi), %r11            // R11 := arg_array + 4 ; ie skip this pointer.
@@ -431,6 +450,7 @@
 .Lreturn_float_quick:
     movss %xmm0, (%r8)           // Store the floating point result.
     ret
+#endif  // __APPLE__
 END_FUNCTION art_quick_invoke_stub
 
     /*
@@ -445,6 +465,10 @@
      *   r9 = char* shorty
      */
 DEFINE_FUNCTION art_quick_invoke_static_stub
+#if defined(__APPLE__)
+    int3
+    int3
+#else
     // Set up argument XMM registers.
     leaq 1(%r9), %r10             // R10 := shorty + 1  ; ie skip return arg character
     movq %rsi, %r11               // R11 := arg_array
@@ -509,6 +533,7 @@
 .Lreturn_float_quick2:
     movss %xmm0, (%r8)           // Store the floating point result.
     ret
+#endif  // __APPLE__
 END_FUNCTION art_quick_invoke_static_stub
 
 MACRO3(NO_ARG_DOWNCALL, c_name, cxx_name, return_macro)
@@ -559,6 +584,45 @@
     END_FUNCTION VAR(c_name, 0)
 END_MACRO
 
+MACRO3(ONE_ARG_REF_DOWNCALL, c_name, cxx_name, return_macro)
+    DEFINE_FUNCTION VAR(c_name, 0)
+    movl 8(%rsp), %esi                 // pass referrer
+    SETUP_REF_ONLY_CALLEE_SAVE_FRAME
+                                       // arg0 is in rdi
+    movq %gs:THREAD_SELF_OFFSET, %rdx  // pass Thread::Current()
+    movq %rsp, %rcx                    // pass SP
+    call PLT_VAR(cxx_name, 1)          // cxx_name(arg0, referrer, Thread*, SP)
+    RESTORE_REF_ONLY_CALLEE_SAVE_FRAME // restore frame up to return address
+    CALL_MACRO(return_macro, 2)
+    END_FUNCTION VAR(c_name, 0)
+END_MACRO
+
+MACRO3(TWO_ARG_REF_DOWNCALL, c_name, cxx_name, return_macro)
+    DEFINE_FUNCTION VAR(c_name, 0)
+    movl 8(%rsp), %edx                 // pass referrer
+    SETUP_REF_ONLY_CALLEE_SAVE_FRAME
+                                       // arg0 and arg1 are in rdi/rsi
+    movq %gs:THREAD_SELF_OFFSET, %rcx  // pass Thread::Current()
+    movq %rsp, %r8                     // pass SP
+    call PLT_VAR(cxx_name, 1)          // (arg0, arg1, referrer, Thread*, SP)
+    RESTORE_REF_ONLY_CALLEE_SAVE_FRAME // restore frame up to return address
+    CALL_MACRO(return_macro, 2)
+    END_FUNCTION VAR(c_name, 0)
+END_MACRO
+
+MACRO3(THREE_ARG_REF_DOWNCALL, c_name, cxx_name, return_macro)
+    DEFINE_FUNCTION VAR(c_name, 0)
+    movl 8(%rsp), %ecx                 // pass referrer
+    SETUP_REF_ONLY_CALLEE_SAVE_FRAME
+                                       // arg0, arg1, and arg2 are in rdi/rsi/rdx
+    movq %gs:THREAD_SELF_OFFSET, %r8    // pass Thread::Current()
+    movq %rsp, %r9                     // pass SP
+    call PLT_VAR(cxx_name, 1)          // cxx_name(arg0, arg1, arg2, referrer, Thread*, SP)
+    RESTORE_REF_ONLY_CALLEE_SAVE_FRAME // restore frame up to return address
+    CALL_MACRO(return_macro, 2)        // return or deliver exception
+    END_FUNCTION VAR(c_name, 0)
+END_MACRO
+
 MACRO0(RETURN_IF_RESULT_IS_NON_ZERO)
     testq %rax, %rax               // rax == 0 ?
     jz  1f                         // if rax == 0 goto 1
@@ -783,14 +847,23 @@
      * rdi(edi) = array, rsi(esi) = index, rdx(edx) = value
      */
 DEFINE_FUNCTION art_quick_aput_obj_with_null_and_bound_check
+#if defined(__APPLE__)
+    int3
+    int3
+#else
     testl %edi, %edi
 //  testq %rdi, %rdi
     jnz art_quick_aput_obj_with_bound_check_local
     jmp art_quick_throw_null_pointer_exception_local
+#endif  // __APPLE__
 END_FUNCTION art_quick_aput_obj_with_null_and_bound_check
 
 
 DEFINE_FUNCTION art_quick_aput_obj_with_bound_check
+#if defined(__APPLE__)
+    int3
+    int3
+#else
     movl ARRAY_LENGTH_OFFSET(%edi), %ecx
 //  movl ARRAY_LENGTH_OFFSET(%rdi), %ecx      // This zero-extends, so value(%rcx)=value(%ecx)
     cmpl %ecx, %esi
@@ -800,6 +873,7 @@
     mov %ecx, %esi
 //  mov %rcx, %rsi
     jmp art_quick_throw_array_bounds_local
+#endif  // __APPLE__
 END_FUNCTION art_quick_aput_obj_with_bound_check
 
 
@@ -894,47 +968,6 @@
 UNIMPLEMENTED art_quick_lshr
 UNIMPLEMENTED art_quick_lushr
 
-
-MACRO3(ONE_ARG_REF_DOWNCALL, c_name, cxx_name, return_macro)
-    DEFINE_FUNCTION VAR(c_name, 0)
-    movl 8(%rsp), %esi                 // pass referrer
-    SETUP_REF_ONLY_CALLEE_SAVE_FRAME
-                                       // arg0 is in rdi
-    movq %gs:THREAD_SELF_OFFSET, %rdx  // pass Thread::Current()
-    movq %rsp, %rcx                    // pass SP
-    call PLT_VAR(cxx_name, 1)          // cxx_name(arg0, referrer, Thread*, SP)
-    RESTORE_REF_ONLY_CALLEE_SAVE_FRAME // restore frame up to return address
-    CALL_MACRO(return_macro, 2)
-    END_FUNCTION VAR(c_name, 0)
-END_MACRO
-
-MACRO3(TWO_ARG_REF_DOWNCALL, c_name, cxx_name, return_macro)
-    DEFINE_FUNCTION VAR(c_name, 0)
-    movl 8(%rsp), %edx                 // pass referrer
-    SETUP_REF_ONLY_CALLEE_SAVE_FRAME
-                                       // arg0 and arg1 are in rdi/rsi
-    movq %gs:THREAD_SELF_OFFSET, %rcx  // pass Thread::Current()
-    movq %rsp, %r8                     // pass SP
-    call PLT_VAR(cxx_name, 1)          // (arg0, arg1, referrer, Thread*, SP)
-    RESTORE_REF_ONLY_CALLEE_SAVE_FRAME // restore frame up to return address
-    CALL_MACRO(return_macro, 2)
-    END_FUNCTION VAR(c_name, 0)
-END_MACRO
-
-MACRO3(THREE_ARG_REF_DOWNCALL, c_name, cxx_name, return_macro)
-    DEFINE_FUNCTION VAR(c_name, 0)
-    movl 8(%rsp), %ecx                 // pass referrer
-    SETUP_REF_ONLY_CALLEE_SAVE_FRAME
-                                       // arg0, arg1, and arg2 are in rdi/rsi/rdx
-    movq %gs:THREAD_SELF_OFFSET, %r8    // pass Thread::Current()
-    movq %rsp, %r9                     // pass SP
-    call PLT_VAR(cxx_name, 1)          // cxx_name(arg0, arg1, arg2, referrer, Thread*, SP)
-    RESTORE_REF_ONLY_CALLEE_SAVE_FRAME // restore frame up to return address
-    CALL_MACRO(return_macro, 2)        // return or deliver exception
-    END_FUNCTION VAR(c_name, 0)
-END_MACRO
-
-
 THREE_ARG_REF_DOWNCALL art_quick_set32_instance, artSet32InstanceFromCode, RETURN_IF_EAX_ZERO
 THREE_ARG_DOWNCALL art_quick_set64_instance, artSet64InstanceFromCode, RETURN_IF_EAX_ZERO
 THREE_ARG_REF_DOWNCALL art_quick_set_obj_instance, artSetObjInstanceFromCode, RETURN_IF_EAX_ZERO
@@ -1006,10 +1039,15 @@
      * rax is a hidden argument that holds the target method's dex method index.
      */
 DEFINE_FUNCTION art_quick_imt_conflict_trampoline
+#if defined(__APPLE__)
+    int3
+    int3
+#else
     movl 8(%rsp), %edi            // load caller Method*
     movl METHOD_DEX_CACHE_METHODS_OFFSET(%rdi), %edi  // load dex_cache_resolved_methods
     movl OBJECT_ARRAY_DATA_OFFSET(%rdi, %rax, 4), %edi  // load the target method
     jmp art_quick_invoke_interface_trampoline_local
+#endif  // __APPLE__
 END_FUNCTION art_quick_imt_conflict_trampoline
 
 DEFINE_FUNCTION art_quick_resolution_trampoline
@@ -1294,6 +1332,10 @@
      * Routine that intercepts method calls and returns.
      */
 DEFINE_FUNCTION art_quick_instrumentation_entry
+#if defined(__APPLE__)
+    int3
+    int3
+#else
     SETUP_REF_AND_ARGS_CALLEE_SAVE_FRAME
 
     movq %rdi, %r12               // Preserve method pointer in a callee-save.
@@ -1313,6 +1355,7 @@
     RESTORE_REF_AND_ARGS_CALLEE_SAVE_FRAME
 
     jmp *%rax                     // Tail call to intended method.
+#endif  // __APPLE__
 END_FUNCTION art_quick_instrumentation_entry
 
 DEFINE_FUNCTION art_quick_instrumentation_exit
diff --git a/runtime/arch/x86_64/thread_x86_64.cc b/runtime/arch/x86_64/thread_x86_64.cc
index b7a5c43..6dff2b4 100644
--- a/runtime/arch/x86_64/thread_x86_64.cc
+++ b/runtime/arch/x86_64/thread_x86_64.cc
@@ -21,18 +21,28 @@
 #include "thread-inl.h"
 #include "thread_list.h"
 
+#if defined(__linux__)
 #include <asm/prctl.h>
 #include <sys/prctl.h>
 #include <sys/syscall.h>
+#endif
 
 namespace art {
 
+#if defined(__linux__)
 static void arch_prctl(int code, void* val) {
   syscall(__NR_arch_prctl, code, val);
 }
+#endif
+
 void Thread::InitCpu() {
   MutexLock mu(nullptr, *Locks::modify_ldt_lock_);
+
+#if defined(__linux__)
   arch_prctl(ARCH_SET_GS, this);
+#else
+  UNIMPLEMENTED(FATAL) << "Need to set GS";
+#endif
 
   // Allow easy indirection back to Thread*.
   tlsPtr_.self = this;
diff --git a/runtime/check_jni.cc b/runtime/check_jni.cc
index a816489..fefb907 100644
--- a/runtime/check_jni.cc
+++ b/runtime/check_jni.cc
@@ -1757,9 +1757,7 @@
     CHECK_JNI_ENTRY(kFlag_Default, "EpJ", env, address, capacity);
     if (address == nullptr) {
       JniAbortF(__FUNCTION__, "non-nullable address is NULL");
-    }
-    if (capacity < 0) {
-      JniAbortF(__FUNCTION__, "capacity must be non-negative: %" PRId64, capacity);
+      return nullptr;
     }
     return CHECK_JNI_EXIT("L", baseEnv(env)->NewDirectByteBuffer(env, address, capacity));
   }
diff --git a/runtime/entrypoints/quick/quick_alloc_entrypoints.cc b/runtime/entrypoints/quick/quick_alloc_entrypoints.cc
index 3301254..dde74de 100644
--- a/runtime/entrypoints/quick/quick_alloc_entrypoints.cc
+++ b/runtime/entrypoints/quick/quick_alloc_entrypoints.cc
@@ -154,10 +154,12 @@
 }
 
 // Generate the entrypoint functions.
+#if !defined(__APPLE__) || !defined(__LP64__)
 GENERATE_ENTRYPOINTS(_dlmalloc);
 GENERATE_ENTRYPOINTS(_rosalloc);
 GENERATE_ENTRYPOINTS(_bump_pointer);
 GENERATE_ENTRYPOINTS(_tlab);
+#endif
 
 static bool entry_points_instrumented = false;
 static gc::AllocatorType entry_points_allocator = gc::kAllocatorTypeDlMalloc;
@@ -172,6 +174,7 @@
 
 void ResetQuickAllocEntryPoints(QuickEntryPoints* qpoints) {
   switch (entry_points_allocator) {
+#if !defined(__APPLE__) || !defined(__LP64__)
     case gc::kAllocatorTypeDlMalloc: {
       SetQuickAllocEntryPoints_dlmalloc(qpoints, entry_points_instrumented);
       break;
@@ -190,6 +193,7 @@
       SetQuickAllocEntryPoints_tlab(qpoints, entry_points_instrumented);
       break;
     }
+#endif
     default: {
       LOG(FATAL) << "Unimplemented";
     }
diff --git a/runtime/jni_internal.cc b/runtime/jni_internal.cc
index 513b409..8842f59 100644
--- a/runtime/jni_internal.cc
+++ b/runtime/jni_internal.cc
@@ -2447,13 +2447,18 @@
   static jobject NewDirectByteBuffer(JNIEnv* env, void* address, jlong capacity) {
     if (capacity < 0) {
       JniAbortF("NewDirectByteBuffer", "negative buffer capacity: %" PRId64, capacity);
+      return nullptr;
     }
     if (address == nullptr && capacity != 0) {
       JniAbortF("NewDirectByteBuffer", "non-zero capacity for nullptr pointer: %" PRId64, capacity);
+      return nullptr;
     }
 
-    // At the moment, the capacity is limited to 32 bits.
-    CHECK_LE(capacity, 0xffffffff);
+    // At the moment, the capacity of DirectByteBuffer is limited to a signed int.
+    if (capacity > INT_MAX) {
+      JniAbortF("NewDirectByteBuffer", "buffer capacity greater than maximum jint: %" PRId64, capacity);
+      return nullptr;
+    }
     jlong address_arg = reinterpret_cast<jlong>(address);
     jint capacity_arg = static_cast<jint>(capacity);
 
diff --git a/runtime/jni_internal_test.cc b/runtime/jni_internal_test.cc
index 5e46c57..a933f86 100644
--- a/runtime/jni_internal_test.cc
+++ b/runtime/jni_internal_test.cc
@@ -1515,6 +1515,12 @@
   ASSERT_TRUE(env_->IsInstanceOf(buffer, buffer_class));
   ASSERT_EQ(env_->GetDirectBufferAddress(buffer), bytes);
   ASSERT_EQ(env_->GetDirectBufferCapacity(buffer), static_cast<jlong>(sizeof(bytes)));
+
+  {
+    CheckJniAbortCatcher check_jni_abort_catcher;
+    env_->NewDirectByteBuffer(bytes, static_cast<jlong>(INT_MAX) + 1);
+    check_jni_abort_catcher.Check("in call to NewDirectByteBuffer");
+  }
 }
 
 TEST_F(JniInternalTest, MonitorEnterExit) {
@@ -1568,7 +1574,6 @@
     CheckJniAbortCatcher check_jni_abort_catcher;
     env_->MonitorEnter(nullptr);
     check_jni_abort_catcher.Check("in call to MonitorEnter");
-
     env_->MonitorExit(nullptr);
     check_jni_abort_catcher.Check("in call to MonitorExit");
   }
diff --git a/runtime/mem_map.cc b/runtime/mem_map.cc
index 8d987df..1074253 100644
--- a/runtime/mem_map.cc
+++ b/runtime/mem_map.cc
@@ -72,7 +72,7 @@
 
 std::multimap<void*, MemMap*> MemMap::maps_;
 
-#if defined(__LP64__) && !defined(__x86_64__)
+#if USE_ART_LOW_4G_ALLOCATOR
 // Handling mem_map in 32b address range for 64b architectures that do not support MAP_32BIT.
 
 // The regular start of memory allocations. The first 64KB is protected by SELinux.
@@ -235,7 +235,7 @@
   // A page allocator would be a useful abstraction here, as
   // 1) It is doubtful that MAP_32BIT on x86_64 is doing the right job for us
   // 2) The linear scheme, even with simple saving of the last known position, is very crude
-#if defined(__LP64__) && !defined(__x86_64__)
+#if USE_ART_LOW_4G_ALLOCATOR
   // MAP_32BIT only available on x86_64.
   void* actual = MAP_FAILED;
   if (low_4gb && expected == nullptr) {
@@ -299,7 +299,7 @@
   }
 
 #else
-#ifdef __x86_64__
+#if defined(__LP64__)
   if (low_4gb && expected == nullptr) {
     flags |= MAP_32BIT;
   }
diff --git a/runtime/mem_map.h b/runtime/mem_map.h
index e42251c..defa6a5 100644
--- a/runtime/mem_map.h
+++ b/runtime/mem_map.h
@@ -30,6 +30,12 @@
 
 namespace art {
 
+#if defined(__LP64__) && (!defined(__x86_64__) || defined(__APPLE__))
+#define USE_ART_LOW_4G_ALLOCATOR 1
+#else
+#define USE_ART_LOW_4G_ALLOCATOR 0
+#endif
+
 #ifdef __linux__
 static constexpr bool kMadviseZeroes = true;
 #else
@@ -147,8 +153,8 @@
   size_t base_size_;  // Length of mapping. May be changed by RemapAtEnd (ie Zygote).
   int prot_;  // Protection of the map.
 
-#if defined(__LP64__) && !defined(__x86_64__)
-  static uintptr_t next_mem_pos_;   // next memory location to check for low_4g extent
+#if USE_ART_LOW_4G_ALLOCATOR
+  static uintptr_t next_mem_pos_;   // Next memory location to check for low_4g extent.
 #endif
 
   // All the non-empty MemMaps. Use a multimap as we do a reserve-and-divide (eg ElfMap::Load()).
diff --git a/runtime/thread.cc b/runtime/thread.cc
index e5ae6d0..ca8c2d7 100644
--- a/runtime/thread.cc
+++ b/runtime/thread.cc
@@ -974,9 +974,14 @@
   // TODO: we call this code when dying but may not have suspended the thread ourself. The
   //       IsSuspended check is therefore racy with the use for dumping (normally we inhibit
   //       the race with the thread_suspend_count_lock_).
-  // No point dumping for an abort in debug builds where we'll hit the not suspended check in stack.
-  bool dump_for_abort = (gAborting > 0) && !kIsDebugBuild;
-  if (this == Thread::Current() || IsSuspended() || dump_for_abort) {
+  bool dump_for_abort = (gAborting > 0);
+  bool safe_to_dump = (this == Thread::Current() || IsSuspended());
+  if (!kIsDebugBuild) {
+    // We always want to dump the stack for an abort, however, there is no point dumping another
+    // thread's stack in debug builds where we'll hit the not suspended check in the stack walk.
+    safe_to_dump = (safe_to_dump || dump_for_abort);
+  }
+  if (safe_to_dump) {
     // If we're currently in native code, dump that stack before dumping the managed stack.
     if (dump_for_abort || ShouldShowNativeStack(this)) {
       DumpKernelStack(os, GetTid(), "  kernel: ", false);
diff --git a/test/700-LoadArgRegs/expected.txt b/test/700-LoadArgRegs/expected.txt
index 4908e5b..4977df6 100644
--- a/test/700-LoadArgRegs/expected.txt
+++ b/test/700-LoadArgRegs/expected.txt
@@ -73,3 +73,4 @@
 -81, -82, -83, -84, -85, -86, -87, -88
 -91, -92, -93, -94, -95, -96, -97, -98, -99
 -1, -91, -92, -93, -94, -95, -96, -97, -98, -99
+1, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 2, 3, 4, 5, 6
diff --git a/test/700-LoadArgRegs/src/Main.java b/test/700-LoadArgRegs/src/Main.java
index 281ab16..0e6de73 100644
--- a/test/700-LoadArgRegs/src/Main.java
+++ b/test/700-LoadArgRegs/src/Main.java
@@ -270,8 +270,11 @@
 //        testL2x(100100100100011L, 100100100100011L);
   }
 
-  static public void main(String[] args) throws Exception {
+  static void testMore(int i1, double d1, double d2, double d3, double d4, double d5, double d6, double d7, double d8, double d9, int i2, int i3, int i4, int i5, int i6) {
+    System.out.println(i1+", "+d1+", "+d2+", "+d3+", "+d4+", "+d5+", "+d6+", "+d7+", "+d8+", "+d9+", "+i2+", "+i3+", "+i4+", "+i5+", "+i6);
+  }
 
+  static public void main(String[] args) throws Exception {
     testI();
     testB();
     testO();
@@ -284,5 +287,6 @@
 
     testLL();
 
+    testMore(1, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 2, 3, 4, 5, 6);
   }
 }
diff --git a/test/Android.oat.mk b/test/Android.oat.mk
index 6e43ab6..3cf9f61 100644
--- a/test/Android.oat.mk
+++ b/test/Android.oat.mk
@@ -60,7 +60,10 @@
 ART_TEST_TARGET_OAT_RULES :=
 
 # We need dex2oat and dalvikvm on the target as well as the core image.
-TEST_ART_TARGET_SYNC_DEPS += $(ART_TARGET_EXECUTABLES) $(TARGET_CORE_IMG_OUT) $(2ND_TARGET_CORE_IMG_OUT)
+TEST_ART_TARGET_SYNC_DEPS += $(ART_TARGET_EXECUTABLES) $(TARGET_CORE_IMG_OUT) $(2ND_TARGET_CORE_IMG_OUT) $(ART_TARGET_TEST_OUT)/$(TARGET_ARCH)/libarttest.so
+ifdef TARGET_2ND_ARCH
+TEST_ART_TARGET_SYNC_DEPS += $(ART_TARGET_TEST_OUT)/$(TARGET_2ND_ARCH)/libarttest.so
+endif
 
 # Define rule to run an individual oat test on the host. Output from the test is written to the
 # host in /tmp/android-data in a directory named after test's rule name (its target) and the parent
@@ -72,7 +75,7 @@
 # $(3): the target (rule name), e.g. test-art-target-oat-default-HelloWorld64
 # $(4): -Xint or undefined - do we want to run with the interpreter or default.
 define define-test-art-oat-rule-target
-  # Add the test dependencies to test-art-target-sync, which will be a prerequisit for the test
+  # Add the test dependencies to test-art-target-sync, which will be a prerequisite for the test
   # to ensure files are pushed to the device.
   TEST_ART_TARGET_SYNC_DEPS += $$(ART_OAT_TEST_$(1)_DEX)
 
@@ -163,9 +166,16 @@
 # All tests require the host executables, libarttest and the core images.
 ART_TEST_HOST_OAT_DEPENDENCIES := \
   $(ART_HOST_EXECUTABLES) \
-  $(HOST_LIBRARY_PATH)/libarttest$(ART_HOST_SHLIB_EXTENSION) \
-  $(HOST_CORE_IMG_OUT) \
+  $(ART_HOST_LIBRARY_PATH)/libarttest$(ART_HOST_SHLIB_EXTENSION) \
+  $(ART_HOST_LIBRARY_PATH)/libjavacore$(ART_HOST_SHLIB_EXTENSION) \
+  $(HOST_CORE_IMG_OUT)
+
+ifneq ($(HOST_PREFER_32_BIT),true)
+ART_TEST_HOST_OAT_DEPENDENCIES += \
+  $(2ND_ART_HOST_LIBRARY_PATH)/libarttest$(ART_HOST_SHLIB_EXTENSION) \
+  $(2ND_ART_HOST_LIBRARY_PATH)/libjavacore$(ART_HOST_SHLIB_EXTENSION) \
   $(2ND_HOST_CORE_IMG_OUT)
+endif
 
 # Define rule to run an individual oat test on the host. Output from the test is written to the
 # host in /tmp/android-data in a directory named after test's rule name (its target) and the parent
diff --git a/test/Android.run-test.mk b/test/Android.run-test.mk
index add97be..c2ff98f 100644
--- a/test/Android.run-test.mk
+++ b/test/Android.run-test.mk
@@ -23,18 +23,18 @@
 
 # Tests that are timing sensitive and flaky on heavily loaded systems.
 TEST_ART_TIMING_SENSITIVE_RUN_TESTS := \
-  test-art-host-default-053-wait-some32 \
-  test-art-host-default-053-wait-some64 \
-  test-art-host-interpreter-053-wait-some32 \
-  test-art-host-interpreter-053-wait-some64 \
-  test-art-host-optimizing-053-wait-some32 \
-  test-art-host-optimizing-053-wait-some64 \
-  test-art-host-default-055-enum-performance32 \
-  test-art-host-default-055-enum-performance64 \
-  test-art-host-interpreter-055-enum-performance32 \
-  test-art-host-interpreter-055-enum-performance64 \
-  test-art-host-optimizing-055-enum-performance32 \
-  test-art-host-optimizing-055-enum-performance64
+  test-art-host-run-test-default-053-wait-some32 \
+  test-art-host-run-test-default-053-wait-some64 \
+  test-art-host-run-test-interpreter-053-wait-some32 \
+  test-art-host-run-test-interpreter-053-wait-some64 \
+  test-art-host-run-test-optimizing-053-wait-some32 \
+  test-art-host-run-test-optimizing-053-wait-some64 \
+  test-art-host-run-test-default-055-enum-performance32 \
+  test-art-host-run-test-default-055-enum-performance64 \
+  test-art-host-run-test-interpreter-055-enum-performance32 \
+  test-art-host-run-test-interpreter-055-enum-performance64 \
+  test-art-host-run-test-optimizing-055-enum-performance32 \
+  test-art-host-run-test-optimizing-055-enum-performance64
 
  # disable timing sensitive tests on "dist" builds.
 ifdef dist_goal
@@ -62,6 +62,7 @@
   TEST_ART_RUN_TEST_BUILD_RULES += $$(dmart_target)
   dmart_target :=
 endef
+$(foreach test, $(TEST_ART_RUN_TESTS), $(eval $(call define-build-art-run-test,$(test))))
 
 include $(CLEAR_VARS)
 LOCAL_MODULE_TAGS := tests
@@ -108,8 +109,14 @@
 # All tests require the host executables and the core images.
 ART_TEST_HOST_RUN_TEST_DEPENDENCIES := \
   $(ART_HOST_EXECUTABLES) \
-  $(HOST_CORE_IMG_OUT) \
+  $(ART_HOST_LIBRARY_PATH)/libjavacore$(ART_HOST_SHLIB_EXTENSION) \
+  $(HOST_CORE_IMG_OUT)
+
+ifneq ($(HOST_PREFER_32_BIT),true)
+ART_TEST_HOST_RUN_TEST_DEPENDENCIES += \
+  $(2ND_ART_HOST_LIBRARY_PATH)/libjavacore$(ART_HOST_SHLIB_EXTENSION) \
   $(2ND_HOST_CORE_IMG_OUT)
+endif
 
 # For a given test create all the combinations of host/target, compiler and suffix such as:
 # test-art-host-run-test-optimizing-003-omnibus-opcodes32