Merge "Avoid verifier crash for quickened invoke on null." into dalvik-dev
diff --git a/build/Android.oat.mk b/build/Android.oat.mk
index ea7b0b0..b680b82 100644
--- a/build/Android.oat.mk
+++ b/build/Android.oat.mk
@@ -81,6 +81,19 @@
 include $(BUILD_PHONY_PACKAGE)
 endif
 
+# If we aren't building the host toolchain, skip building the target core.art.
+ifeq ($(WITH_HOST_DALVIK),true)
+ifeq ($(ART_BUILD_TARGET),true)
+include $(CLEAR_VARS)
+LOCAL_MODULE := core.art
+LOCAL_MODULE_TAGS := optional
+LOCAL_ADDITIONAL_DEPENDENCIES := art/build/Android.common.mk
+LOCAL_ADDITIONAL_DEPENDENCIES += art/build/Android.oat.mk
+LOCAL_ADDITIONAL_DEPENDENCIES += $(TARGET_CORE_IMG_OUT)
+include $(BUILD_PHONY_PACKAGE)
+endif
+endif
+
 ########################################################################
 # The full system boot classpath
 TARGET_BOOT_JARS := $(subst :, ,$(DEXPREOPT_BOOT_JARS))
diff --git a/compiler/dex/compiler_enums.h b/compiler/dex/compiler_enums.h
index 6ea21fc..56facfd 100644
--- a/compiler/dex/compiler_enums.h
+++ b/compiler/dex/compiler_enums.h
@@ -44,6 +44,8 @@
   kRet0,
   kRet1,
   kInvokeTgt,
+  kHiddenArg,
+  kHiddenFpArg,
   kCount
 };
 
diff --git a/compiler/dex/quick/arm/target_arm.cc b/compiler/dex/quick/arm/target_arm.cc
index 3395ae7..52aba9b 100644
--- a/compiler/dex/quick/arm/target_arm.cc
+++ b/compiler/dex/quick/arm/target_arm.cc
@@ -74,6 +74,8 @@
     case kRet0: res = rARM_RET0; break;
     case kRet1: res = rARM_RET1; break;
     case kInvokeTgt: res = rARM_INVOKE_TGT; break;
+    case kHiddenArg: res = r12; break;
+    case kHiddenFpArg: res = INVALID_REG; break;
     case kCount: res = rARM_COUNT; break;
   }
   return res;
diff --git a/compiler/dex/quick/gen_invoke.cc b/compiler/dex/quick/gen_invoke.cc
index 62feade..e8335a4 100644
--- a/compiler/dex/quick/gen_invoke.cc
+++ b/compiler/dex/quick/gen_invoke.cc
@@ -491,69 +491,56 @@
 }
 
 /*
- * All invoke-interface calls bounce off of art_quick_invoke_interface_trampoline,
- * which will locate the target and continue on via a tail call.
+ * Emit the next instruction in an invoke interface sequence. This will do a lookup in the
+ * class's IMT, calling either the actual method or art_quick_imt_conflict_trampoline if
+ * more than one interface method map to the same index. Note also that we'll load the first
+ * argument ("this") into kArg1 here rather than the standard LoadArgRegs.
  */
 static int NextInterfaceCallInsn(CompilationUnit* cu, CallInfo* info, int state,
                                  const MethodReference& target_method,
-                                 uint32_t unused, uintptr_t unused2,
-                                 uintptr_t direct_method, InvokeType unused4) {
+                                 uint32_t method_idx, uintptr_t unused,
+                                 uintptr_t direct_method, InvokeType unused2) {
   Mir2Lir* cg = static_cast<Mir2Lir*>(cu->cg.get());
-  ThreadOffset trampoline = QUICK_ENTRYPOINT_OFFSET(pInvokeInterfaceTrampoline);
 
-  if (direct_method != 0) {
-    switch (state) {
-      case 0:  // Load the trampoline target [sets kInvokeTgt].
-        if (cu->instruction_set != kX86) {
-          cg->LoadWordDisp(cg->TargetReg(kSelf), trampoline.Int32Value(),
-                           cg->TargetReg(kInvokeTgt));
-        }
-        // Get the interface Method* [sets kArg0]
-        if (direct_method != static_cast<unsigned int>(-1)) {
-          cg->LoadConstant(cg->TargetReg(kArg0), direct_method);
-        } else {
-          CHECK_EQ(cu->dex_file, target_method.dex_file);
-          LIR* data_target = cg->ScanLiteralPool(cg->method_literal_list_,
-                                                 target_method.dex_method_index, 0);
-          if (data_target == NULL) {
-            data_target = cg->AddWordData(&cg->method_literal_list_,
-                                          target_method.dex_method_index);
-            data_target->operands[1] = kInterface;
-          }
-          LIR* load_pc_rel = cg->OpPcRelLoad(cg->TargetReg(kArg0), data_target);
-          cg->AppendLIR(load_pc_rel);
-          DCHECK_EQ(cu->instruction_set, kThumb2) << reinterpret_cast<void*>(data_target);
-        }
-        break;
-      default:
-        return -1;
-    }
-  } else {
-    switch (state) {
-      case 0:
-        // Get the current Method* [sets kArg0] - TUNING: remove copy of method if it is promoted.
-        cg->LoadCurrMethodDirect(cg->TargetReg(kArg0));
-        // Load the trampoline target [sets kInvokeTgt].
-        if (cu->instruction_set != kX86) {
-          cg->LoadWordDisp(cg->TargetReg(kSelf), trampoline.Int32Value(),
-                           cg->TargetReg(kInvokeTgt));
-        }
-        break;
-    case 1:  // Get method->dex_cache_resolved_methods_ [set/use kArg0]
-      cg->LoadWordDisp(cg->TargetReg(kArg0),
-                       mirror::ArtMethod::DexCacheResolvedMethodsOffset().Int32Value(),
-                       cg->TargetReg(kArg0));
-      break;
-    case 2:  // Grab target method* [set/use kArg0]
+  switch (state) {
+    case 0:  // Set target method index in case of conflict [set kHiddenArg, kHiddenFpArg (x86)]
       CHECK_EQ(cu->dex_file, target_method.dex_file);
-      cg->LoadWordDisp(cg->TargetReg(kArg0),
-                       mirror::Array::DataOffset(sizeof(mirror::Object*)).Int32Value() +
-                           (target_method.dex_method_index * 4),
+      CHECK_LT(target_method.dex_method_index, target_method.dex_file->NumMethodIds());
+      cg->LoadConstant(cg->TargetReg(kHiddenArg), target_method.dex_method_index);
+      if (cu->instruction_set == kX86) {
+        cg->OpRegCopy(cg->TargetReg(kHiddenFpArg), cg->TargetReg(kHiddenArg));
+      }
+      break;
+    case 1: {  // Get "this" [set kArg1]
+      RegLocation  rl_arg = info->args[0];
+      cg->LoadValueDirectFixed(rl_arg, cg->TargetReg(kArg1));
+      break;
+    }
+    case 2:  // Is "this" null? [use kArg1]
+      cg->GenNullCheck(info->args[0].s_reg_low, cg->TargetReg(kArg1), info->opt_flags);
+      // Get this->klass_ [use kArg1, set kInvokeTgt]
+      cg->LoadWordDisp(cg->TargetReg(kArg1), mirror::Object::ClassOffset().Int32Value(),
+                       cg->TargetReg(kInvokeTgt));
+      break;
+    case 3:  // Get this->klass_->imtable [use kInvokeTgt, set kInvokeTgt]
+      cg->LoadWordDisp(cg->TargetReg(kInvokeTgt), mirror::Class::ImTableOffset().Int32Value(),
+                       cg->TargetReg(kInvokeTgt));
+      break;
+    case 4:  // Get target method [use kInvokeTgt, set kArg0]
+      cg->LoadWordDisp(cg->TargetReg(kInvokeTgt), ((method_idx % ClassLinker::kImtSize) * 4) +
+                       mirror::Array::DataOffset(sizeof(mirror::Object*)).Int32Value(),
                        cg->TargetReg(kArg0));
       break;
+    case 5:  // Get the compiled code address [use kArg0, set kInvokeTgt]
+      if (cu->instruction_set != kX86) {
+        cg->LoadWordDisp(cg->TargetReg(kArg0),
+                         mirror::ArtMethod::GetEntryPointFromCompiledCodeOffset().Int32Value(),
+                         cg->TargetReg(kInvokeTgt));
+        break;
+      }
+      // Intentional fallthrough for X86
     default:
       return -1;
-    }
   }
   return state + 1;
 }
@@ -971,6 +958,29 @@
   return true;
 }
 
+bool Mir2Lir::GenInlinedReverseBytes(CallInfo* info, OpSize size) {
+  if (cu_->instruction_set == kMips) {
+    // TODO - add Mips implementation
+    return false;
+  }
+  RegLocation rl_src_i = info->args[0];
+  RegLocation rl_dest = InlineTarget(info);  // result reg
+  RegLocation rl_result = EvalLoc(rl_dest, kCoreReg, true);
+  if (size == kLong) {
+    RegLocation rl_i = LoadValueWide(rl_src_i, kCoreReg);
+    OpRegReg(kOpRev, rl_result.low_reg, rl_i.high_reg);
+    OpRegReg(kOpRev, rl_result.high_reg, rl_i.low_reg);
+    StoreValueWide(rl_dest, rl_result);
+  } else {
+    DCHECK(size == kWord || size == kSignedHalf);
+    OpKind op = (size == kWord) ? kOpRev : kOpRevsh;
+    RegLocation rl_i = LoadValue(rl_src_i, kCoreReg);
+    OpRegReg(op, rl_result.low_reg, rl_i.low_reg);
+    StoreValue(rl_dest, rl_result);
+  }
+  return true;
+}
+
 bool Mir2Lir::GenInlinedAbsInt(CallInfo* info) {
   if (cu_->instruction_set == kMips) {
     // TODO - add Mips implementation
@@ -1249,6 +1259,16 @@
     if (tgt_method == "float java.lang.Float.intBitsToFloat(int)") {
       return GenInlinedFloatCvt(info);
     }
+  } else if (tgt_methods_declaring_class.starts_with("Ljava/lang/Integer;")) {
+    std::string tgt_method(PrettyMethod(info->index, *cu_->dex_file));
+    if (tgt_method == "int java.lang.Integer.reverseBytes(int)") {
+      return GenInlinedReverseBytes(info, kWord);
+    }
+  } else if (tgt_methods_declaring_class.starts_with("Ljava/lang/Long;")) {
+    std::string tgt_method(PrettyMethod(info->index, *cu_->dex_file));
+    if (tgt_method == "long java.lang.Long.reverseBytes(long)") {
+      return GenInlinedReverseBytes(info, kLong);
+    }
   } else if (tgt_methods_declaring_class.starts_with("Ljava/lang/Math;") ||
              tgt_methods_declaring_class.starts_with("Ljava/lang/StrictMath;")) {
     std::string tgt_method(PrettyMethod(info->index, *cu_->dex_file));
@@ -1272,6 +1292,11 @@
         tgt_method == "double java.lang.StrictMath.sqrt(double)") {
       return GenInlinedSqrt(info);
     }
+  } else if (tgt_methods_declaring_class.starts_with("Ljava/lang/Short;")) {
+    std::string tgt_method(PrettyMethod(info->index, *cu_->dex_file));
+    if (tgt_method == "short java.lang.Short.reverseBytes(short)") {
+      return GenInlinedReverseBytes(info, kSignedHalf);
+    }
   } else if (tgt_methods_declaring_class.starts_with("Ljava/lang/String;")) {
     std::string tgt_method(PrettyMethod(info->index, *cu_->dex_file));
     if (tgt_method == "char java.lang.String.charAt(int)") {
@@ -1390,11 +1415,8 @@
                                               &vtable_idx,
                                               &direct_code, &direct_method) && !SLOW_INVOKE_PATH;
   if (info->type == kInterface) {
-    if (fast_path) {
-      p_null_ck = &null_ck;
-    }
     next_call_insn = fast_path ? NextInterfaceCallInsn : NextInterfaceCallInsnWithAccessCheck;
-    skip_this = false;
+    skip_this = fast_path;
   } else if (info->type == kDirect) {
     if (fast_path) {
       p_null_ck = &null_ck;
@@ -1434,15 +1456,14 @@
   if (cu_->instruction_set != kX86) {
     call_inst = OpReg(kOpBlx, TargetReg(kInvokeTgt));
   } else {
-    if (fast_path && info->type != kInterface) {
+    if (fast_path) {
       call_inst = OpMem(kOpBlx, TargetReg(kArg0),
                         mirror::ArtMethod::GetEntryPointFromCompiledCodeOffset().Int32Value());
     } else {
       ThreadOffset trampoline(-1);
       switch (info->type) {
       case kInterface:
-        trampoline = fast_path ? QUICK_ENTRYPOINT_OFFSET(pInvokeInterfaceTrampoline)
-            : QUICK_ENTRYPOINT_OFFSET(pInvokeInterfaceTrampolineWithAccessCheck);
+        trampoline = QUICK_ENTRYPOINT_OFFSET(pInvokeInterfaceTrampolineWithAccessCheck);
         break;
       case kDirect:
         trampoline = QUICK_ENTRYPOINT_OFFSET(pInvokeDirectTrampolineWithAccessCheck);
diff --git a/compiler/dex/quick/mips/target_mips.cc b/compiler/dex/quick/mips/target_mips.cc
index 0ee32d4..9c598e6 100644
--- a/compiler/dex/quick/mips/target_mips.cc
+++ b/compiler/dex/quick/mips/target_mips.cc
@@ -76,6 +76,8 @@
     case kRet0: res = rMIPS_RET0; break;
     case kRet1: res = rMIPS_RET1; break;
     case kInvokeTgt: res = rMIPS_INVOKE_TGT; break;
+    case kHiddenArg: res = r_T0; break;
+    case kHiddenFpArg: res = INVALID_REG; break;
     case kCount: res = rMIPS_COUNT; break;
   }
   return res;
diff --git a/compiler/dex/quick/mir_to_lir.h b/compiler/dex/quick/mir_to_lir.h
index d629b44..7e9848d 100644
--- a/compiler/dex/quick/mir_to_lir.h
+++ b/compiler/dex/quick/mir_to_lir.h
@@ -544,6 +544,7 @@
 
     bool GenInlinedCharAt(CallInfo* info);
     bool GenInlinedStringIsEmptyOrLength(CallInfo* info, bool is_empty);
+    bool GenInlinedReverseBytes(CallInfo* info, OpSize size);
     bool GenInlinedAbsInt(CallInfo* info);
     bool GenInlinedAbsLong(CallInfo* info);
     bool GenInlinedFloatCvt(CallInfo* info);
diff --git a/compiler/dex/quick/x86/target_x86.cc b/compiler/dex/quick/x86/target_x86.cc
index 901ac9e..878fa76 100644
--- a/compiler/dex/quick/x86/target_x86.cc
+++ b/compiler/dex/quick/x86/target_x86.cc
@@ -85,6 +85,8 @@
     case kRet0: res = rX86_RET0; break;
     case kRet1: res = rX86_RET1; break;
     case kInvokeTgt: res = rX86_INVOKE_TGT; break;
+    case kHiddenArg: res = rAX; break;
+    case kHiddenFpArg: res = fr0; break;
     case kCount: res = rX86_COUNT; break;
   }
   return res;
diff --git a/compiler/driver/compiler_driver.cc b/compiler/driver/compiler_driver.cc
index 91b0188..053ea16 100644
--- a/compiler/driver/compiler_driver.cc
+++ b/compiler/driver/compiler_driver.cc
@@ -469,6 +469,11 @@
   return CreateTrampoline(instruction_set_, kJniAbi, JNI_ENTRYPOINT_OFFSET(pDlsymLookup));
 }
 
+const std::vector<uint8_t>* CompilerDriver::CreatePortableImtConflictTrampoline() const {
+  return CreateTrampoline(instruction_set_, kPortableAbi,
+                          PORTABLE_ENTRYPOINT_OFFSET(pPortableImtConflictTrampoline));
+}
+
 const std::vector<uint8_t>* CompilerDriver::CreatePortableResolutionTrampoline() const {
   return CreateTrampoline(instruction_set_, kPortableAbi,
                           PORTABLE_ENTRYPOINT_OFFSET(pPortableResolutionTrampoline));
@@ -479,6 +484,11 @@
                           PORTABLE_ENTRYPOINT_OFFSET(pPortableToInterpreterBridge));
 }
 
+const std::vector<uint8_t>* CompilerDriver::CreateQuickImtConflictTrampoline() const {
+  return CreateTrampoline(instruction_set_, kQuickAbi,
+                          QUICK_ENTRYPOINT_OFFSET(pQuickImtConflictTrampoline));
+}
+
 const std::vector<uint8_t>* CompilerDriver::CreateQuickResolutionTrampoline() const {
   return CreateTrampoline(instruction_set_, kQuickAbi,
                           QUICK_ENTRYPOINT_OFFSET(pQuickResolutionTrampoline));
@@ -1080,7 +1090,7 @@
     }
     use_dex_cache = true;
   } else {
-    if (sharp_type != kStatic && sharp_type != kDirect && sharp_type != kInterface) {
+    if (sharp_type != kStatic && sharp_type != kDirect) {
       return;
     }
     // TODO: support patching on all architectures.
@@ -1101,9 +1111,7 @@
     }
   }
   if (update_stats && method_code_in_boot) {
-    if (sharp_type != kInterface) {  // Interfaces always go via a trampoline until we get IMTs.
-      stats_->DirectCallsToBoot(*type);
-    }
+    stats_->DirectCallsToBoot(*type);
     stats_->DirectMethodsToBoot(*type);
   }
   if (!use_dex_cache && compiling_boot) {
@@ -1145,19 +1153,15 @@
     if (compiling_boot) {
       *type = sharp_type;
       *direct_method = -1;
-      if (sharp_type != kInterface) {
-        *direct_code = -1;
-      }
+      *direct_code = -1;
     } else {
       bool method_in_image =
           Runtime::Current()->GetHeap()->FindSpaceFromObject(method, false)->IsImageSpace();
       if (method_in_image) {
-        CHECK_EQ(method->IsAbstract(), sharp_type == kInterface);
+        CHECK(!method->IsAbstract());
         *type = sharp_type;
         *direct_method = reinterpret_cast<uintptr_t>(method);
-        if (*type != kInterface) {
-          *direct_code = reinterpret_cast<uintptr_t>(method->GetEntryPointFromCompiledCode());
-        }
+        *direct_code = reinterpret_cast<uintptr_t>(method->GetEntryPointFromCompiledCode());
         target_method->dex_file = method->GetDeclaringClass()->GetDexCache()->GetDexFile();
         target_method->dex_method_index = method->GetDexMethodIndex();
       } else if (!must_use_direct_pointers) {
@@ -1187,6 +1191,8 @@
   if (resolved_method != NULL) {
     if (*invoke_type == kVirtual || *invoke_type == kSuper) {
       *vtable_idx = resolved_method->GetMethodIndex();
+    } else if (*invoke_type == kInterface) {
+      *vtable_idx = resolved_method->GetDexMethodIndex();
     }
     // Don't try to fast-path if we don't understand the caller's class or this appears to be an
     // Incompatible Class Change Error.
diff --git a/compiler/driver/compiler_driver.h b/compiler/driver/compiler_driver.h
index 971021f..c791753 100644
--- a/compiler/driver/compiler_driver.h
+++ b/compiler/driver/compiler_driver.h
@@ -130,10 +130,14 @@
       SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
   const std::vector<uint8_t>* CreateJniDlsymLookup() const
       SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+  const std::vector<uint8_t>* CreatePortableImtConflictTrampoline() const
+      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
   const std::vector<uint8_t>* CreatePortableResolutionTrampoline() const
       SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
   const std::vector<uint8_t>* CreatePortableToInterpreterBridge() const
       SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+  const std::vector<uint8_t>* CreateQuickImtConflictTrampoline() const
+      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
   const std::vector<uint8_t>* CreateQuickResolutionTrampoline() const
       SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
   const std::vector<uint8_t>* CreateQuickToInterpreterBridge() const
diff --git a/compiler/image_writer.cc b/compiler/image_writer.cc
index 871cfd5..684cb01 100644
--- a/compiler/image_writer.cc
+++ b/compiler/image_writer.cc
@@ -36,6 +36,7 @@
 #include "globals.h"
 #include "image.h"
 #include "intern_table.h"
+#include "lock_word.h"
 #include "mirror/art_field-inl.h"
 #include "mirror/art_method-inl.h"
 #include "mirror/array-inl.h"
@@ -98,11 +99,15 @@
 
   jni_dlsym_lookup_offset_ = oat_file_->GetOatHeader().GetJniDlsymLookupOffset();
 
+  portable_imt_conflict_trampoline_offset_ =
+      oat_file_->GetOatHeader().GetPortableImtConflictTrampolineOffset();
   portable_resolution_trampoline_offset_ =
       oat_file_->GetOatHeader().GetPortableResolutionTrampolineOffset();
   portable_to_interpreter_bridge_offset_ =
       oat_file_->GetOatHeader().GetPortableToInterpreterBridgeOffset();
 
+  quick_imt_conflict_trampoline_offset_ =
+      oat_file_->GetOatHeader().GetQuickImtConflictTrampolineOffset();
   quick_resolution_trampoline_offset_ =
       oat_file_->GetOatHeader().GetQuickResolutionTrampolineOffset();
   quick_to_interpreter_bridge_offset_ =
@@ -390,6 +395,8 @@
                   ObjectArray<Object>::Alloc(self, object_array_class,
                                              ImageHeader::kImageRootsMax));
   image_roots->Set(ImageHeader::kResolutionMethod, runtime->GetResolutionMethod());
+  image_roots->Set(ImageHeader::kImtConflictMethod, runtime->GetImtConflictMethod());
+  image_roots->Set(ImageHeader::kDefaultImt, runtime->GetDefaultImt());
   image_roots->Set(ImageHeader::kCalleeSaveMethod,
                    runtime->GetCalleeSaveMethod(Runtime::kSaveAll));
   image_roots->Set(ImageHeader::kRefsOnlySaveMethod,
@@ -489,7 +496,30 @@
   DCHECK_LT(offset + n, image_writer->image_->Size());
   memcpy(dst, src, n);
   Object* copy = reinterpret_cast<Object*>(dst);
-  copy->SetField32(Object::MonitorOffset(), 0, false);  // We may have inflated the lock during compilation.
+  // Write in a hash code of objects which have inflated monitors or a hash code in their monitor
+  // word.
+  LockWord lw(copy->GetLockWord());
+  switch (lw.GetState()) {
+    case LockWord::kFatLocked: {
+      Monitor* monitor = lw.FatLockMonitor();
+      CHECK(monitor != nullptr);
+      CHECK(!monitor->IsLocked());
+      copy->SetLockWord(LockWord::FromHashCode(monitor->GetHashCode()));
+      break;
+    }
+    case LockWord::kThinLocked: {
+      LOG(FATAL) << "Thin locked object " << obj << " found during object copy";
+      break;
+    }
+    case LockWord::kUnlocked:
+      // Fall-through.
+    case LockWord::kHashCode:
+      // Do nothing since we can just keep the same hash code.
+      break;
+    default:
+      LOG(FATAL) << "Unreachable.";
+      break;
+  }
   image_writer->FixupObject(obj, copy);
 }
 
@@ -527,6 +557,12 @@
 #else
     copy->SetEntryPointFromCompiledCode(GetOatAddress(quick_resolution_trampoline_offset_));
 #endif
+  } else if (UNLIKELY(orig == Runtime::Current()->GetImtConflictMethod())) {
+#if defined(ART_USE_PORTABLE_COMPILER)
+    copy->SetEntryPointFromCompiledCode(GetOatAddress(portable_imt_conflict_trampoline_offset_));
+#else
+    copy->SetEntryPointFromCompiledCode(GetOatAddress(quick_imt_conflict_trampoline_offset_));
+#endif
   } else {
     // We assume all methods have code. If they don't currently then we set them to the use the
     // resolution trampoline. Abstract methods never have code and so we need to make sure their
diff --git a/compiler/image_writer.h b/compiler/image_writer.h
index 0d85f36..0b408e8 100644
--- a/compiler/image_writer.h
+++ b/compiler/image_writer.h
@@ -40,7 +40,8 @@
   explicit ImageWriter(const CompilerDriver& compiler_driver)
       : compiler_driver_(compiler_driver), oat_file_(NULL), image_end_(0), image_begin_(NULL),
         oat_data_begin_(NULL), interpreter_to_interpreter_bridge_offset_(0),
-        interpreter_to_compiled_code_bridge_offset_(0), portable_resolution_trampoline_offset_(0),
+        interpreter_to_compiled_code_bridge_offset_(0), portable_imt_conflict_trampoline_offset_(0),
+        portable_resolution_trampoline_offset_(0), quick_imt_conflict_trampoline_offset_(0),
         quick_resolution_trampoline_offset_(0) {}
 
   ~ImageWriter() {}
@@ -204,8 +205,10 @@
   uint32_t interpreter_to_interpreter_bridge_offset_;
   uint32_t interpreter_to_compiled_code_bridge_offset_;
   uint32_t jni_dlsym_lookup_offset_;
+  uint32_t portable_imt_conflict_trampoline_offset_;
   uint32_t portable_resolution_trampoline_offset_;
   uint32_t portable_to_interpreter_bridge_offset_;
+  uint32_t quick_imt_conflict_trampoline_offset_;
   uint32_t quick_resolution_trampoline_offset_;
   uint32_t quick_to_interpreter_bridge_offset_;
 
diff --git a/compiler/oat_test.cc b/compiler/oat_test.cc
index af86743..815bca5 100644
--- a/compiler/oat_test.cc
+++ b/compiler/oat_test.cc
@@ -149,7 +149,7 @@
 TEST_F(OatTest, OatHeaderSizeCheck) {
   // If this test is failing and you have to update these constants,
   // it is time to update OatHeader::kOatVersion
-  EXPECT_EQ(64U, sizeof(OatHeader));
+  EXPECT_EQ(72U, sizeof(OatHeader));
   EXPECT_EQ(28U, sizeof(OatMethodOffsets));
 }
 
diff --git a/compiler/oat_writer.cc b/compiler/oat_writer.cc
index f681d7d..28355bf 100644
--- a/compiler/oat_writer.cc
+++ b/compiler/oat_writer.cc
@@ -55,8 +55,10 @@
     size_interpreter_to_interpreter_bridge_(0),
     size_interpreter_to_compiled_code_bridge_(0),
     size_jni_dlsym_lookup_(0),
+    size_portable_imt_conflict_trampoline_(0),
     size_portable_resolution_trampoline_(0),
     size_portable_to_interpreter_bridge_(0),
+    size_quick_imt_conflict_trampoline_(0),
     size_quick_resolution_trampoline_(0),
     size_quick_to_interpreter_bridge_(0),
     size_trampoline_alignment_(0),
@@ -229,8 +231,10 @@
     DO_TRAMPOLINE(interpreter_to_interpreter_bridge_, InterpreterToInterpreterBridge);
     DO_TRAMPOLINE(interpreter_to_compiled_code_bridge_, InterpreterToCompiledCodeBridge);
     DO_TRAMPOLINE(jni_dlsym_lookup_, JniDlsymLookup);
+    DO_TRAMPOLINE(portable_imt_conflict_trampoline_, PortableImtConflictTrampoline);
     DO_TRAMPOLINE(portable_resolution_trampoline_, PortableResolutionTrampoline);
     DO_TRAMPOLINE(portable_to_interpreter_bridge_, PortableToInterpreterBridge);
+    DO_TRAMPOLINE(quick_imt_conflict_trampoline_, QuickImtConflictTrampoline);
     DO_TRAMPOLINE(quick_resolution_trampoline_, QuickResolutionTrampoline);
     DO_TRAMPOLINE(quick_to_interpreter_bridge_, QuickToInterpreterBridge);
 
@@ -239,8 +243,10 @@
     oat_header_->SetInterpreterToInterpreterBridgeOffset(0);
     oat_header_->SetInterpreterToCompiledCodeBridgeOffset(0);
     oat_header_->SetJniDlsymLookupOffset(0);
+    oat_header_->SetPortableImtConflictTrampolineOffset(0);
     oat_header_->SetPortableResolutionTrampolineOffset(0);
     oat_header_->SetPortableToInterpreterBridgeOffset(0);
+    oat_header_->SetQuickImtConflictTrampolineOffset(0);
     oat_header_->SetQuickResolutionTrampolineOffset(0);
     oat_header_->SetQuickToInterpreterBridgeOffset(0);
   }
@@ -519,8 +525,10 @@
     DO_STAT(size_interpreter_to_interpreter_bridge_);
     DO_STAT(size_interpreter_to_compiled_code_bridge_);
     DO_STAT(size_jni_dlsym_lookup_);
+    DO_STAT(size_portable_imt_conflict_trampoline_);
     DO_STAT(size_portable_resolution_trampoline_);
     DO_STAT(size_portable_to_interpreter_bridge_);
+    DO_STAT(size_quick_imt_conflict_trampoline_);
     DO_STAT(size_quick_resolution_trampoline_);
     DO_STAT(size_quick_to_interpreter_bridge_);
     DO_STAT(size_trampoline_alignment_);
@@ -616,8 +624,10 @@
     DO_TRAMPOLINE(interpreter_to_interpreter_bridge_);
     DO_TRAMPOLINE(interpreter_to_compiled_code_bridge_);
     DO_TRAMPOLINE(jni_dlsym_lookup_);
+    DO_TRAMPOLINE(portable_imt_conflict_trampoline_);
     DO_TRAMPOLINE(portable_resolution_trampoline_);
     DO_TRAMPOLINE(portable_to_interpreter_bridge_);
+    DO_TRAMPOLINE(quick_imt_conflict_trampoline_);
     DO_TRAMPOLINE(quick_resolution_trampoline_);
     DO_TRAMPOLINE(quick_to_interpreter_bridge_);
     #undef DO_TRAMPOLINE
diff --git a/compiler/oat_writer.h b/compiler/oat_writer.h
index e3cb0a8..5d947cf 100644
--- a/compiler/oat_writer.h
+++ b/compiler/oat_writer.h
@@ -226,8 +226,10 @@
   UniquePtr<const std::vector<uint8_t> > interpreter_to_interpreter_bridge_;
   UniquePtr<const std::vector<uint8_t> > interpreter_to_compiled_code_bridge_;
   UniquePtr<const std::vector<uint8_t> > jni_dlsym_lookup_;
+  UniquePtr<const std::vector<uint8_t> > portable_imt_conflict_trampoline_;
   UniquePtr<const std::vector<uint8_t> > portable_resolution_trampoline_;
   UniquePtr<const std::vector<uint8_t> > portable_to_interpreter_bridge_;
+  UniquePtr<const std::vector<uint8_t> > quick_imt_conflict_trampoline_;
   UniquePtr<const std::vector<uint8_t> > quick_resolution_trampoline_;
   UniquePtr<const std::vector<uint8_t> > quick_to_interpreter_bridge_;
 
@@ -240,8 +242,10 @@
   uint32_t size_interpreter_to_interpreter_bridge_;
   uint32_t size_interpreter_to_compiled_code_bridge_;
   uint32_t size_jni_dlsym_lookup_;
+  uint32_t size_portable_imt_conflict_trampoline_;
   uint32_t size_portable_resolution_trampoline_;
   uint32_t size_portable_to_interpreter_bridge_;
+  uint32_t size_quick_imt_conflict_trampoline_;
   uint32_t size_quick_resolution_trampoline_;
   uint32_t size_quick_to_interpreter_bridge_;
   uint32_t size_trampoline_alignment_;
diff --git a/dex2oat/dex2oat.cc b/dex2oat/dex2oat.cc
index d8112ea..1beb862 100644
--- a/dex2oat/dex2oat.cc
+++ b/dex2oat/dex2oat.cc
@@ -345,10 +345,6 @@
       return false;
     }
     Runtime* runtime = Runtime::Current();
-    // if we loaded an existing image, we will reuse values from the image roots.
-    if (!runtime->HasResolutionMethod()) {
-      runtime->SetResolutionMethod(runtime->CreateResolutionMethod());
-    }
     for (int i = 0; i < Runtime::kLastCalleeSaveType; i++) {
       Runtime::CalleeSaveType type = Runtime::CalleeSaveType(i);
       if (!runtime->HasCalleeSaveMethod(type)) {
diff --git a/oatdump/oatdump.cc b/oatdump/oatdump.cc
index fdeeaec..3a32ff1 100644
--- a/oatdump/oatdump.cc
+++ b/oatdump/oatdump.cc
@@ -86,6 +86,8 @@
 
 const char* image_roots_descriptions_[] = {
   "kResolutionMethod",
+  "kImtConflictMethod",
+  "kDefaultImt",
   "kCalleeSaveMethod",
   "kRefsOnlySaveMethod",
   "kRefsAndArgsSaveMethod",
@@ -1005,7 +1007,8 @@
           indent_os << StringPrintf("OAT CODE: %p\n", oat_code);
         }
       } else if (method->IsAbstract() || method->IsCalleeSaveMethod() ||
-          method->IsResolutionMethod() || MethodHelper(method).IsClassInitializer()) {
+          method->IsResolutionMethod() || method->IsImtConflictMethod() ||
+          MethodHelper(method).IsClassInitializer()) {
         DCHECK(method->GetNativeGcMap() == NULL) << PrettyMethod(method);
         DCHECK(method->GetMappingTable() == NULL) << PrettyMethod(method);
       } else {
diff --git a/runtime/arch/arm/entrypoints_init_arm.cc b/runtime/arch/arm/entrypoints_init_arm.cc
index 352982f..3dac636 100644
--- a/runtime/arch/arm/entrypoints_init_arm.cc
+++ b/runtime/arch/arm/entrypoints_init_arm.cc
@@ -121,10 +121,10 @@
 extern "C" int32_t art_quick_string_compareto(void*, void*);
 
 // Invoke entrypoints.
+extern "C" void art_quick_imt_conflict_trampoline(mirror::ArtMethod*);
 extern "C" void art_quick_resolution_trampoline(mirror::ArtMethod*);
 extern "C" void art_quick_to_interpreter_bridge(mirror::ArtMethod*);
 extern "C" void art_quick_invoke_direct_trampoline_with_access_check(uint32_t, void*);
-extern "C" void art_quick_invoke_interface_trampoline(uint32_t, void*);
 extern "C" void art_quick_invoke_interface_trampoline_with_access_check(uint32_t, void*);
 extern "C" void art_quick_invoke_static_trampoline_with_access_check(uint32_t, void*);
 extern "C" void art_quick_invoke_super_trampoline_with_access_check(uint32_t, void*);
@@ -253,10 +253,10 @@
   qpoints->pMemcpy = memcpy;
 
   // Invocation
+  qpoints->pQuickImtConflictTrampoline = art_quick_imt_conflict_trampoline;
   qpoints->pQuickResolutionTrampoline = art_quick_resolution_trampoline;
   qpoints->pQuickToInterpreterBridge = art_quick_to_interpreter_bridge;
   qpoints->pInvokeDirectTrampolineWithAccessCheck = art_quick_invoke_direct_trampoline_with_access_check;
-  qpoints->pInvokeInterfaceTrampoline = art_quick_invoke_interface_trampoline;
   qpoints->pInvokeInterfaceTrampolineWithAccessCheck = art_quick_invoke_interface_trampoline_with_access_check;
   qpoints->pInvokeStaticTrampolineWithAccessCheck = art_quick_invoke_static_trampoline_with_access_check;
   qpoints->pInvokeSuperTrampolineWithAccessCheck = art_quick_invoke_super_trampoline_with_access_check;
diff --git a/runtime/arch/arm/quick_entrypoints_arm.S b/runtime/arch/arm/quick_entrypoints_arm.S
index 736ce2f..50a5176 100644
--- a/runtime/arch/arm/quick_entrypoints_arm.S
+++ b/runtime/arch/arm/quick_entrypoints_arm.S
@@ -325,24 +325,25 @@
 ENTRY art_quick_lock_object
     cbz    r0, slow_lock
 retry_lock:
-    ldrex  r1, [r0, #LOCK_WORD_OFFSET]
     ldrt   r2, [r9, #THREAD_ID_OFFSET]
-    cmp    r1, #0
-    bmi    slow_lock                  @ lock word contains a monitor
-    bne    already_thin
+    ldrex  r1, [r0, #LOCK_WORD_OFFSET]
+    cbnz   r1, not_unlocked           @ already thin locked
     @ unlocked case - r2 holds thread id with count of 0
     strex  r3, r2, [r0, #LOCK_WORD_OFFSET]
     cbnz   r3, strex_fail             @ store failed, retry
     bx lr
 strex_fail:
     b retry_lock                      @ unlikely forward branch, need to reload and recheck r1/r2
-already_thin:
+not_unlocked:
+    lsr    r3, r1, 30
+    cbnz   r3, slow_lock              @ if either of the top two bits are set, go slow path
     eor    r2, r1, r2                 @ lock_word.ThreadId() ^ self->ThreadId()
     uxth   r2, r2                     @ zero top 16 bits
     cbnz   r2, slow_lock              @ lock word and self thread id's match -> recursive lock
                                       @ else contention, go to slow path
-    adds   r2, r1, #65536             @ increment count in lock word placing in r2 for storing
-    bmi    slow_lock                  @ if we overflow the count go slow
+    add    r2, r1, #65536             @ increment count in lock word placing in r2 for storing
+    lsr    r1, r2, 30                 @ if either of the top two bits are set, we overflowed.
+    cbnz   r1, slow_lock              @ if we overflow the count go slow path
     str    r2, [r0, #LOCK_WORD_OFFSET] @ no need for strex as we hold the lock
     bx lr
 slow_lock:
@@ -363,9 +364,9 @@
 ENTRY art_quick_unlock_object
     cbz    r0, slow_unlock
     ldr    r1, [r0, #LOCK_WORD_OFFSET]
+    lsr    r2, r1, 30
+    cbnz   r2, slow_unlock            @ if either of the top two bits are set, go slow path
     ldr    r2, [r9, #THREAD_ID_OFFSET]
-    cmp    r1, #0
-    bmi    slow_unlock                @ lock word contains a monitor
     eor    r3, r1, r2                 @ lock_word.ThreadId() ^ self->ThreadId()
     uxth   r3, r3                     @ zero top 16 bits
     cbnz   r3, slow_unlock            @ do lock word and self thread id's match?
@@ -1040,6 +1041,18 @@
     DELIVER_PENDING_EXCEPTION
 END art_quick_proxy_invoke_handler
 
+    /*
+     * Called to resolve an imt conflict. r12 is a hidden argument that holds the target method's
+     * dex method index.
+     */
+ENTRY art_quick_imt_conflict_trampoline
+    ldr    r0, [sp, #0]            @ load caller Method*
+    ldr    r0, [r0, #METHOD_DEX_CACHE_METHODS_OFFSET]  @ load dex_cache_resolved_methods
+    add    r0, #OBJECT_ARRAY_DATA_OFFSET  @ get starting address of data
+    ldr    r0, [r0, r12, lsl 2]    @ load the target method
+    b art_quick_invoke_interface_trampoline
+END art_quick_imt_conflict_trampoline
+
     .extern artQuickResolutionTrampoline
 ENTRY art_quick_resolution_trampoline
     SETUP_REF_AND_ARGS_CALLEE_SAVE_FRAME
diff --git a/runtime/arch/mips/entrypoints_init_mips.cc b/runtime/arch/mips/entrypoints_init_mips.cc
index cc975d75..331a461 100644
--- a/runtime/arch/mips/entrypoints_init_mips.cc
+++ b/runtime/arch/mips/entrypoints_init_mips.cc
@@ -122,10 +122,10 @@
 extern "C" int32_t art_quick_string_compareto(void*, void*);
 
 // Invoke entrypoints.
+extern "C" void art_quick_imt_conflict_trampoline(mirror::ArtMethod*);
 extern "C" void art_quick_resolution_trampoline(mirror::ArtMethod*);
 extern "C" void art_quick_to_interpreter_bridge(mirror::ArtMethod*);
 extern "C" void art_quick_invoke_direct_trampoline_with_access_check(uint32_t, void*);
-extern "C" void art_quick_invoke_interface_trampoline(uint32_t, void*);
 extern "C" void art_quick_invoke_interface_trampoline_with_access_check(uint32_t, void*);
 extern "C" void art_quick_invoke_static_trampoline_with_access_check(uint32_t, void*);
 extern "C" void art_quick_invoke_super_trampoline_with_access_check(uint32_t, void*);
@@ -253,10 +253,10 @@
   qpoints->pMemcpy = memcpy;
 
   // Invocation
+  qpoints->pQuickImtConflictTrampoline = art_quick_imt_conflict_trampoline;
   qpoints->pQuickResolutionTrampoline = art_quick_resolution_trampoline;
   qpoints->pQuickToInterpreterBridge = art_quick_to_interpreter_bridge;
   qpoints->pInvokeDirectTrampolineWithAccessCheck = art_quick_invoke_direct_trampoline_with_access_check;
-  qpoints->pInvokeInterfaceTrampoline = art_quick_invoke_interface_trampoline;
   qpoints->pInvokeInterfaceTrampolineWithAccessCheck = art_quick_invoke_interface_trampoline_with_access_check;
   qpoints->pInvokeStaticTrampolineWithAccessCheck = art_quick_invoke_static_trampoline_with_access_check;
   qpoints->pInvokeSuperTrampolineWithAccessCheck = art_quick_invoke_super_trampoline_with_access_check;
diff --git a/runtime/arch/mips/quick_entrypoints_mips.S b/runtime/arch/mips/quick_entrypoints_mips.S
index 031d13a..451b1bb 100644
--- a/runtime/arch/mips/quick_entrypoints_mips.S
+++ b/runtime/arch/mips/quick_entrypoints_mips.S
@@ -1060,6 +1060,21 @@
     DELIVER_PENDING_EXCEPTION
 END art_quick_proxy_invoke_handler
 
+    /*
+     * Called to resolve an imt conflict. t0 is a hidden argument that holds the target method's
+     * dex method index.
+     */
+ENTRY art_quick_imt_conflict_trampoline
+    GENERATE_GLOBAL_POINTER
+    lw      $a0, 0($sp)            # load caller Method*
+    lw      $a0, METHOD_DEX_CACHE_METHODS_OFFSET($a0)  # load dex_cache_resolved_methods
+    sll     $t0, 2                 # convert target method offset to bytes
+    add     $a0, $t0               # get address of target method
+    lw      $a0, OBJECT_ARRAY_DATA_OFFSET($a0)  # load the target method
+    la      $t9, art_quick_invoke_interface_trampoline
+    jr      $t9
+END art_quick_imt_conflict_trampoline
+
     .extern artQuickResolutionTrampoline
 ENTRY art_quick_resolution_trampoline
     GENERATE_GLOBAL_POINTER
diff --git a/runtime/arch/x86/entrypoints_init_x86.cc b/runtime/arch/x86/entrypoints_init_x86.cc
index 89dd1b8..99b0dd5 100644
--- a/runtime/arch/x86/entrypoints_init_x86.cc
+++ b/runtime/arch/x86/entrypoints_init_x86.cc
@@ -104,10 +104,10 @@
 extern "C" void* art_quick_memcpy(void*, const void*, size_t);
 
 // Invoke entrypoints.
+extern "C" void art_quick_imt_conflict_trampoline(mirror::ArtMethod*);
 extern "C" void art_quick_resolution_trampoline(mirror::ArtMethod*);
 extern "C" void art_quick_to_interpreter_bridge(mirror::ArtMethod*);
 extern "C" void art_quick_invoke_direct_trampoline_with_access_check(uint32_t, void*);
-extern "C" void art_quick_invoke_interface_trampoline(uint32_t, void*);
 extern "C" void art_quick_invoke_interface_trampoline_with_access_check(uint32_t, void*);
 extern "C" void art_quick_invoke_static_trampoline_with_access_check(uint32_t, void*);
 extern "C" void art_quick_invoke_super_trampoline_with_access_check(uint32_t, void*);
@@ -235,10 +235,10 @@
   qpoints->pMemcpy = art_quick_memcpy;
 
   // Invocation
+  qpoints->pQuickImtConflictTrampoline = art_quick_imt_conflict_trampoline;
   qpoints->pQuickResolutionTrampoline = art_quick_resolution_trampoline;
   qpoints->pQuickToInterpreterBridge = art_quick_to_interpreter_bridge;
   qpoints->pInvokeDirectTrampolineWithAccessCheck = art_quick_invoke_direct_trampoline_with_access_check;
-  qpoints->pInvokeInterfaceTrampoline = art_quick_invoke_interface_trampoline;
   qpoints->pInvokeInterfaceTrampolineWithAccessCheck = art_quick_invoke_interface_trampoline_with_access_check;
   qpoints->pInvokeStaticTrampolineWithAccessCheck = art_quick_invoke_static_trampoline_with_access_check;
   qpoints->pInvokeSuperTrampolineWithAccessCheck = art_quick_invoke_super_trampoline_with_access_check;
diff --git a/runtime/arch/x86/quick_entrypoints_x86.S b/runtime/arch/x86/quick_entrypoints_x86.S
index 805f6f4..6fe4993 100644
--- a/runtime/arch/x86/quick_entrypoints_x86.S
+++ b/runtime/arch/x86/quick_entrypoints_x86.S
@@ -411,9 +411,10 @@
     jz   slow_lock
 retry_lock:
     movl LOCK_WORD_OFFSET(%eax), %ecx     // ecx := lock word
+    test LITERAL(0xC0000000), %ecx        // test the 2 high bits.
+    jne  slow_lock                        // slow path if either of the two high bits are set.
     movl %fs:THREAD_ID_OFFSET, %edx       // edx := thread id
     test %ecx, %ecx
-    jb   slow_lock                        // lock word contains a monitor
     jnz  already_thin                     // lock word contains a thin lock
     // unlocked case - %edx holds thread id with count of 0
     movl %eax, %ecx                       // remember object in case of retry
@@ -428,7 +429,8 @@
     cmpw %ax, %dx                         // do we hold the lock already?
     jne  slow_lock
     addl LITERAL(65536), %eax             // increment recursion count
-    jb   slow_lock                        // count overflowed so go slow
+    test LITERAL(0xC0000000), %eax        // overflowed if either of top two bits are set
+    jne  slow_lock                        // count overflowed so go slow
     movl %eax, LOCK_WORD_OFFSET(%ecx)     // update lockword, cmpxchg not necessary as we hold lock
     ret
 slow_lock:
@@ -997,6 +999,20 @@
     RETURN_OR_DELIVER_PENDING_EXCEPTION    // return or deliver exception
 END_FUNCTION art_quick_proxy_invoke_handler
 
+    /*
+     * Called to resolve an imt conflict. xmm0 is a hidden argument that holds the target method's
+     * dex method index.
+     */
+DEFINE_FUNCTION art_quick_imt_conflict_trampoline
+    PUSH ecx
+    movl 8(%esp), %eax            // load caller Method*
+    movl METHOD_DEX_CACHE_METHODS_OFFSET(%eax), %eax  // load dex_cache_resolved_methods
+    movd %xmm0, %ecx              // get target method index stored in xmm0
+    movl OBJECT_ARRAY_DATA_OFFSET(%eax, %ecx, 4), %eax  // load the target method
+    POP ecx
+    jmp art_quick_invoke_interface_trampoline
+END_FUNCTION art_quick_imt_conflict_trampoline
+
 DEFINE_FUNCTION art_quick_resolution_trampoline
     SETUP_REF_AND_ARGS_CALLEE_SAVE_FRAME
     PUSH esp                      // pass SP
diff --git a/runtime/asm_support.h b/runtime/asm_support.h
index a6700bc..e9bbf91 100644
--- a/runtime/asm_support.h
+++ b/runtime/asm_support.h
@@ -38,7 +38,8 @@
 #define STRING_OFFSET_OFFSET 20
 #define STRING_DATA_OFFSET 12
 
-// Offset of field Method::entry_point_from_compiled_code_
+// Offsets within java.lang.Method.
+#define METHOD_DEX_CACHE_METHODS_OFFSET 16
 #define METHOD_CODE_OFFSET 40
 
 #endif  // ART_RUNTIME_ASM_SUPPORT_H_
diff --git a/runtime/class_linker.cc b/runtime/class_linker.cc
index 03f2c9d..2fc564f 100644
--- a/runtime/class_linker.cc
+++ b/runtime/class_linker.cc
@@ -196,7 +196,9 @@
       class_table_dirty_(false),
       intern_table_(intern_table),
       portable_resolution_trampoline_(NULL),
-      quick_resolution_trampoline_(NULL) {
+      quick_resolution_trampoline_(NULL),
+      portable_imt_conflict_trampoline_(NULL),
+      quick_imt_conflict_trampoline_(NULL) {
   CHECK_EQ(arraysize(class_roots_descriptors_), size_t(kClassRootsMax));
 }
 
@@ -336,6 +338,12 @@
   InitializePrimitiveClass(char_class.get(), Primitive::kPrimChar);
   SetClassRoot(kPrimitiveChar, char_class.get());  // needs descriptor
 
+  // Create runtime resolution and imt conflict methods. Also setup the default imt.
+  Runtime* runtime = Runtime::Current();
+  runtime->SetResolutionMethod(runtime->CreateResolutionMethod());
+  runtime->SetImtConflictMethod(runtime->CreateImtConflictMethod());
+  runtime->SetDefaultImt(runtime->CreateDefaultImt(this));
+
   // Object, String and DexCache need to be rerun through FindSystemClass to finish init
   java_lang_Object->SetStatus(mirror::Class::kStatusNotReady, self);
   mirror::Class* Object_class = FindSystemClass("Ljava/lang/Object;");
@@ -1045,6 +1053,8 @@
   CHECK(oat_file.GetOatHeader().GetImageFileLocation().empty());
   portable_resolution_trampoline_ = oat_file.GetOatHeader().GetPortableResolutionTrampoline();
   quick_resolution_trampoline_ = oat_file.GetOatHeader().GetQuickResolutionTrampoline();
+  portable_imt_conflict_trampoline_ = oat_file.GetOatHeader().GetPortableImtConflictTrampoline();
+  quick_imt_conflict_trampoline_ = oat_file.GetOatHeader().GetQuickImtConflictTrampoline();
   mirror::Object* dex_caches_object = space->GetImageHeader().GetImageRoot(ImageHeader::kDexCaches);
   mirror::ObjectArray<mirror::DexCache>* dex_caches =
       dex_caches_object->AsObjectArray<mirror::DexCache>();
@@ -3518,6 +3528,8 @@
 
 bool ClassLinker::LinkInterfaceMethods(SirtRef<mirror::Class>& klass,
                                        mirror::ObjectArray<mirror::Class>* interfaces) {
+  // Set the imt table to be all conflicts by default.
+  klass->SetImTable(Runtime::Current()->GetDefaultImt());
   size_t super_ifcount;
   if (klass->HasSuperClass()) {
     super_ifcount = klass->GetSuperClass()->GetIfTableCount();
@@ -3625,6 +3637,13 @@
   if (klass->IsInterface()) {
     return true;
   }
+  // Allocate imtable
+  bool imtable_changed = false;
+  SirtRef<mirror::ObjectArray<mirror::ArtMethod> > imtable(self, AllocArtMethodArray(self, kImtSize));
+  if (UNLIKELY(imtable.get() == NULL)) {
+    CHECK(self->IsExceptionPending());  // OOME.
+    return false;
+  }
   std::vector<mirror::ArtMethod*> miranda_list;
   MethodHelper vtable_mh(NULL, this);
   MethodHelper interface_mh(NULL, this);
@@ -3664,6 +3683,14 @@
               return false;
             }
             method_array->Set(j, vtable_method);
+            // Place method in imt if entry is empty, place conflict otherwise.
+            uint32_t imt_index = interface_method->GetDexMethodIndex() % kImtSize;
+            if (imtable->Get(imt_index) == NULL) {
+              imtable->Set(imt_index, vtable_method);
+              imtable_changed = true;
+            } else {
+              imtable->Set(imt_index, Runtime::Current()->GetImtConflictMethod());
+            }
             break;
           }
         }
@@ -3695,6 +3722,16 @@
       }
     }
   }
+  if (imtable_changed) {
+    // Fill in empty entries in interface method table with conflict.
+    mirror::ArtMethod* imt_conflict_method = Runtime::Current()->GetImtConflictMethod();
+    for (size_t i = 0; i < kImtSize; i++) {
+      if (imtable->Get(i) == NULL) {
+        imtable->Set(i, imt_conflict_method);
+      }
+    }
+    klass->SetImTable(imtable.get());
+  }
   if (!miranda_list.empty()) {
     int old_method_count = klass->NumVirtualMethods();
     int new_method_count = old_method_count + miranda_list.size();
diff --git a/runtime/class_linker.h b/runtime/class_linker.h
index 0bc1b5f..473370d9 100644
--- a/runtime/class_linker.h
+++ b/runtime/class_linker.h
@@ -51,6 +51,11 @@
 
 class ClassLinker {
  public:
+  // Interface method table size. Increasing this value reduces the chance of two interface methods
+  // colliding in the interface method table but increases the size of classes that implement
+  // (non-marker) interfaces.
+  static constexpr size_t kImtSize = 64;
+
   // Creates the class linker by bootstrapping from dex files.
   static ClassLinker* CreateFromCompiler(const std::vector<const DexFile*>& boot_class_path,
                                          InternTable* intern_table)
@@ -340,6 +345,18 @@
     return quick_resolution_trampoline_;
   }
 
+  const void* GetPortableImtConflictTrampoline() const {
+    return portable_imt_conflict_trampoline_;
+  }
+
+  const void* GetQuickImtConflictTrampoline() const {
+    return quick_imt_conflict_trampoline_;
+  }
+
+  InternTable* GetInternTable() const {
+    return intern_table_;
+  }
+
   // Attempts to insert a class into a class table.  Returns NULL if
   // the class was inserted, otherwise returns an existing class with
   // the same descriptor and ClassLoader.
@@ -608,6 +625,8 @@
 
   const void* portable_resolution_trampoline_;
   const void* quick_resolution_trampoline_;
+  const void* portable_imt_conflict_trampoline_;
+  const void* quick_imt_conflict_trampoline_;
 
   friend class ImageWriter;  // for GetClassRoots
   FRIEND_TEST(ClassLinkerTest, ClassRootDescriptors);
diff --git a/runtime/class_linker_test.cc b/runtime/class_linker_test.cc
index 029b73e..a52b680 100644
--- a/runtime/class_linker_test.cc
+++ b/runtime/class_linker_test.cc
@@ -497,6 +497,7 @@
     offsets.push_back(CheckOffset(OFFSETOF_MEMBER(mirror::Class, direct_methods_),                "directMethods"));
     offsets.push_back(CheckOffset(OFFSETOF_MEMBER(mirror::Class, ifields_),                       "iFields"));
     offsets.push_back(CheckOffset(OFFSETOF_MEMBER(mirror::Class, iftable_),                       "ifTable"));
+    offsets.push_back(CheckOffset(OFFSETOF_MEMBER(mirror::Class, imtable_),                       "imTable"));
     offsets.push_back(CheckOffset(OFFSETOF_MEMBER(mirror::Class, name_),                          "name"));
     offsets.push_back(CheckOffset(OFFSETOF_MEMBER(mirror::Class, sfields_),                       "sFields"));
     offsets.push_back(CheckOffset(OFFSETOF_MEMBER(mirror::Class, super_class_),                   "superClass"));
@@ -582,11 +583,11 @@
     offsets.push_back(CheckOffset(OFFSETOF_MEMBER(mirror::StringClass, ASCII_),                  "ASCII"));
     offsets.push_back(CheckOffset(OFFSETOF_MEMBER(mirror::StringClass, CASE_INSENSITIVE_ORDER_), "CASE_INSENSITIVE_ORDER"));
 
-    // alphabetical 64-bit
-    offsets.push_back(CheckOffset(OFFSETOF_MEMBER(mirror::StringClass, serialVersionUID_),       "serialVersionUID"));
-
     // alphabetical 32-bit
     offsets.push_back(CheckOffset(OFFSETOF_MEMBER(mirror::StringClass, REPLACEMENT_CHAR_),       "REPLACEMENT_CHAR"));
+
+    // alphabetical 64-bit
+    offsets.push_back(CheckOffset(OFFSETOF_MEMBER(mirror::StringClass, serialVersionUID_),       "serialVersionUID"));
   };
 };
 
diff --git a/runtime/common_test.h b/runtime/common_test.h
index 899eab1..673a03b 100644
--- a/runtime/common_test.h
+++ b/runtime/common_test.h
@@ -329,9 +329,6 @@
       CompilerBackend compiler_backend = kQuick;
 #endif
 
-      if (!runtime_->HasResolutionMethod()) {
-        runtime_->SetResolutionMethod(runtime_->CreateResolutionMethod());
-      }
       for (int i = 0; i < Runtime::kLastCalleeSaveType; i++) {
         Runtime::CalleeSaveType type = Runtime::CalleeSaveType(i);
         if (!runtime_->HasCalleeSaveMethod(type)) {
diff --git a/runtime/entrypoints/entrypoint_utils.h b/runtime/entrypoints/entrypoint_utils.h
index 2008604..7ce50c5 100644
--- a/runtime/entrypoints/entrypoint_utils.h
+++ b/runtime/entrypoints/entrypoint_utils.h
@@ -372,14 +372,21 @@
       return vtable->GetWithoutChecks(vtable_index);
     }
     case kInterface: {
-      mirror::ArtMethod* interface_method =
-          this_object->GetClass()->FindVirtualMethodForInterface(resolved_method);
-      if (UNLIKELY(interface_method == nullptr)) {
-        ThrowIncompatibleClassChangeErrorClassForInterfaceDispatch(resolved_method, this_object,
-                                                                   referrer);
-        return nullptr;  // Failure.
+      uint32_t imt_index = resolved_method->GetDexMethodIndex() % ClassLinker::kImtSize;
+      mirror::ObjectArray<mirror::ArtMethod>* imt_table = this_object->GetClass()->GetImTable();
+      mirror::ArtMethod* imt_method = imt_table->Get(imt_index);
+      if (!imt_method->IsImtConflictMethod()) {
+        return imt_method;
       } else {
-        return interface_method;
+        mirror::ArtMethod* interface_method =
+            this_object->GetClass()->FindVirtualMethodForInterface(resolved_method);
+        if (UNLIKELY(interface_method == nullptr)) {
+          ThrowIncompatibleClassChangeErrorClassForInterfaceDispatch(resolved_method, this_object,
+                                                                     referrer);
+          return nullptr;  // Failure.
+        } else {
+          return interface_method;
+        }
       }
     }
     default:
@@ -665,6 +672,23 @@
 #endif
 }
 
+static inline const void* GetPortableImtConflictTrampoline(ClassLinker* class_linker) {
+  return class_linker->GetPortableImtConflictTrampoline();
+}
+
+static inline const void* GetQuickImtConflictTrampoline(ClassLinker* class_linker) {
+  return class_linker->GetQuickImtConflictTrampoline();
+}
+
+// Return address of imt conflict trampoline stub for defined compiler.
+static inline const void* GetImtConflictTrampoline(ClassLinker* class_linker) {
+#if defined(ART_USE_PORTABLE_COMPILER)
+  return GetPortableImtConflictTrampoline(class_linker);
+#else
+  return GetQuickImtConflictTrampoline(class_linker);
+#endif
+}
+
 extern "C" void art_portable_proxy_invoke_handler();
 static inline const void* GetPortableProxyInvokeHandler() {
   return reinterpret_cast<void*>(art_portable_proxy_invoke_handler);
diff --git a/runtime/entrypoints/portable/portable_entrypoints.h b/runtime/entrypoints/portable/portable_entrypoints.h
index d456447..dbea707 100644
--- a/runtime/entrypoints/portable/portable_entrypoints.h
+++ b/runtime/entrypoints/portable/portable_entrypoints.h
@@ -35,6 +35,7 @@
 // compiler ABI.
 struct PACKED(4) PortableEntryPoints {
   // Invocation
+  void (*pPortableImtConflictTrampoline)(mirror::ArtMethod*);
   void (*pPortableResolutionTrampoline)(mirror::ArtMethod*);
   void (*pPortableToInterpreterBridge)(mirror::ArtMethod*);
 };
diff --git a/runtime/entrypoints/quick/quick_entrypoints.h b/runtime/entrypoints/quick/quick_entrypoints.h
index c8a85a0..1ba2066 100644
--- a/runtime/entrypoints/quick/quick_entrypoints.h
+++ b/runtime/entrypoints/quick/quick_entrypoints.h
@@ -118,10 +118,10 @@
   void* (*pMemcpy)(void*, const void*, size_t);
 
   // Invocation
+  void (*pQuickImtConflictTrampoline)(mirror::ArtMethod*);
   void (*pQuickResolutionTrampoline)(mirror::ArtMethod*);
   void (*pQuickToInterpreterBridge)(mirror::ArtMethod*);
   void (*pInvokeDirectTrampolineWithAccessCheck)(uint32_t, void*);
-  void (*pInvokeInterfaceTrampoline)(uint32_t, void*);
   void (*pInvokeInterfaceTrampolineWithAccessCheck)(uint32_t, void*);
   void (*pInvokeStaticTrampolineWithAccessCheck)(uint32_t, void*);
   void (*pInvokeSuperTrampolineWithAccessCheck)(uint32_t, void*);
diff --git a/runtime/gc/collector/mark_sweep.h b/runtime/gc/collector/mark_sweep.h
index 19df2da..7e05136 100644
--- a/runtime/gc/collector/mark_sweep.h
+++ b/runtime/gc/collector/mark_sweep.h
@@ -69,7 +69,7 @@
   virtual bool HandleDirtyObjectsPhase() EXCLUSIVE_LOCKS_REQUIRED(Locks::mutator_lock_);
   virtual void MarkingPhase() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
   virtual void ReclaimPhase() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
-  virtual void FinishPhase();
+  virtual void FinishPhase() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
   virtual void MarkReachableObjects()
       SHARED_LOCKS_REQUIRED(Locks::mutator_lock_)
       EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_);
@@ -208,13 +208,13 @@
   void SetImmuneRange(mirror::Object* begin, mirror::Object* end);
 
   void SweepSystemWeaks()
-      SHARED_LOCKS_REQUIRED(Locks::heap_bitmap_lock_);
+      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_, Locks::heap_bitmap_lock_);
 
   static mirror::Object* VerifySystemWeakIsLiveCallback(mirror::Object* obj, void* arg)
       SHARED_LOCKS_REQUIRED(Locks::heap_bitmap_lock_);
 
   void VerifySystemWeaks()
-      SHARED_LOCKS_REQUIRED(Locks::heap_bitmap_lock_);
+      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_, Locks::heap_bitmap_lock_);
 
   // Verify that an object is live, either in a live bitmap or in the allocation stack.
   void VerifyIsLive(const mirror::Object* obj)
diff --git a/runtime/gc/heap.h b/runtime/gc/heap.h
index 1c2b7ef..7d2441b 100644
--- a/runtime/gc/heap.h
+++ b/runtime/gc/heap.h
@@ -518,8 +518,9 @@
 
   void PreGcVerification(collector::GarbageCollector* gc);
   void PreSweepingGcVerification(collector::GarbageCollector* gc)
+      EXCLUSIVE_LOCKS_REQUIRED(Locks::mutator_lock_);
+  void PostGcVerification(collector::GarbageCollector* gc)
       SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
-  void PostGcVerification(collector::GarbageCollector* gc);
 
   // Update the watermark for the native allocated bytes based on the current number of native
   // bytes allocated and the target utilization ratio.
diff --git a/runtime/gc/space/dlmalloc_space.cc b/runtime/gc/space/dlmalloc_space.cc
index 8c13d79..9ebc16a 100644
--- a/runtime/gc/space/dlmalloc_space.cc
+++ b/runtime/gc/space/dlmalloc_space.cc
@@ -287,8 +287,6 @@
   size_t size = RoundUp(Size(), kPageSize);
   // Trim the heap so that we minimize the size of the Zygote space.
   Trim();
-  // Trim our mem-map to free unused pages.
-  GetMemMap()->UnMapAtEnd(end_);
   // TODO: Not hardcode these in?
   const size_t starting_size = kPageSize;
   const size_t initial_size = 2 * MB;
@@ -308,9 +306,10 @@
   VLOG(heap) << "Size " << GetMemMap()->Size();
   VLOG(heap) << "GrowthLimit " << PrettySize(growth_limit);
   VLOG(heap) << "Capacity " << PrettySize(capacity);
+  // Remap the tail.
   std::string error_msg;
-  UniquePtr<MemMap> mem_map(MemMap::MapAnonymous(alloc_space_name, End(), capacity,
-                                                 PROT_READ | PROT_WRITE, &error_msg));
+  UniquePtr<MemMap> mem_map(GetMemMap()->RemapAtEnd(end_, alloc_space_name,
+                                                    PROT_READ | PROT_WRITE, &error_msg));
   CHECK(mem_map.get() != nullptr) << error_msg;
   void* mspace = CreateMallocSpace(end_, starting_size, initial_size);
   // Protect memory beyond the initial size.
diff --git a/runtime/gc/space/image_space.cc b/runtime/gc/space/image_space.cc
index fa28642..e12ee06 100644
--- a/runtime/gc/space/image_space.cc
+++ b/runtime/gc/space/image_space.cc
@@ -241,6 +241,10 @@
   Runtime* runtime = Runtime::Current();
   mirror::Object* resolution_method = image_header.GetImageRoot(ImageHeader::kResolutionMethod);
   runtime->SetResolutionMethod(down_cast<mirror::ArtMethod*>(resolution_method));
+  mirror::Object* imt_conflict_method = image_header.GetImageRoot(ImageHeader::kImtConflictMethod);
+  runtime->SetImtConflictMethod(down_cast<mirror::ArtMethod*>(imt_conflict_method));
+  mirror::Object* default_imt = image_header.GetImageRoot(ImageHeader::kDefaultImt);
+  runtime->SetDefaultImt(down_cast<mirror::ObjectArray<mirror::ArtMethod>*>(default_imt));
 
   mirror::Object* callee_save_method = image_header.GetImageRoot(ImageHeader::kCalleeSaveMethod);
   runtime->SetCalleeSaveMethod(down_cast<mirror::ArtMethod*>(callee_save_method), Runtime::kSaveAll);
diff --git a/runtime/image.h b/runtime/image.h
index 2cb468f..246f106 100644
--- a/runtime/image.h
+++ b/runtime/image.h
@@ -90,6 +90,8 @@
 
   enum ImageRoot {
     kResolutionMethod,
+    kImtConflictMethod,
+    kDefaultImt,
     kCalleeSaveMethod,
     kRefsOnlySaveMethod,
     kRefsAndArgsSaveMethod,
diff --git a/runtime/lock_word-inl.h b/runtime/lock_word-inl.h
index 30bf9bb..59947f5 100644
--- a/runtime/lock_word-inl.h
+++ b/runtime/lock_word-inl.h
@@ -33,7 +33,7 @@
 
 inline Monitor* LockWord::FatLockMonitor() const {
   DCHECK_EQ(GetState(), kFatLocked);
-  return reinterpret_cast<Monitor*>(value_ << 1);
+  return reinterpret_cast<Monitor*>(value_ << kStateSize);
 }
 
 inline LockWord::LockWord() : value_(0) {
@@ -41,10 +41,15 @@
 }
 
 inline LockWord::LockWord(Monitor* mon)
-    : value_((reinterpret_cast<uint32_t>(mon) >> 1) | (kStateFat << kStateShift)) {
+    : value_((reinterpret_cast<uint32_t>(mon) >> kStateSize) | (kStateFat << kStateShift)) {
   DCHECK_EQ(FatLockMonitor(), mon);
 }
 
+inline uint32_t LockWord::GetHashCode() const {
+  DCHECK_EQ(GetState(), kHashCode);
+  return (value_ >> kHashShift) & kHashMask;
+}
+
 }  // namespace art
 
 #endif  // ART_RUNTIME_LOCK_WORD_INL_H_
diff --git a/runtime/lock_word.h b/runtime/lock_word.h
index cd4bfbb..9b6c64a 100644
--- a/runtime/lock_word.h
+++ b/runtime/lock_word.h
@@ -29,30 +29,37 @@
 
 class Monitor;
 
-/* The lock value itself as stored in mirror::Object::monitor_.  The MSB of the lock encodes its
- * state.  When cleared, the lock is in the "thin" state and its bits are formatted as follows:
+/* The lock value itself as stored in mirror::Object::monitor_.  The two most significant bits of
+ * the state. The three possible states are fat locked, thin/unlocked, and hash code.
+ * When the lock word is in the "thin" state and its bits are formatted as follows:
  *
- *  |3|32222222222111|11111110000000000|
- *  |1|09876543210987|65432109876543210|
- *  |0| lock count   | thread id       |
+ *  |33|22222222221111|1111110000000000|
+ *  |10|98765432109876|5432109876543210|
+ *  |00| lock count   |thread id owner |
  *
- * When set, the lock is in the "fat" state and its bits are formatted as follows:
+ * When the lock word is in the "fat" state and its bits are formatted as follows:
  *
- *  |3|3222222222211111111110000000000|
- *  |1|0987654321098765432109876543210|
- *  |1| Monitor* >> 1                 |
+ *  |33|222222222211111111110000000000|
+ *  |10|987654321098765432109876543210|
+ *  |01| Monitor* >> kStateSize       |
+ *
+ * When the lock word is in hash state and its bits are formatted as follows:
+ *
+ *  |33|222222222211111111110000000000|
+ *  |10|987654321098765432109876543210|
+ *  |10| HashCode                     |
  */
 class LockWord {
  public:
   enum {
-    // Number of bits to encode the state, currently just fat or thin/unlocked.
-    kStateSize = 1,
+    // Number of bits to encode the state, currently just fat or thin/unlocked or hash code.
+    kStateSize = 2,
     // Number of bits to encode the thin lock owner.
     kThinLockOwnerSize = 16,
     // Remaining bits are the recursive lock count.
     kThinLockCountSize = 32 - kThinLockOwnerSize - kStateSize,
-
     // Thin lock bits. Owner in lowest bits.
+
     kThinLockOwnerShift = 0,
     kThinLockOwnerMask = (1 << kThinLockOwnerSize) - 1,
     // Count in higher bits.
@@ -65,25 +72,42 @@
     kStateMask = (1 << kStateSize) - 1,
     kStateThinOrUnlocked = 0,
     kStateFat = 1,
+    kStateHash = 2,
+
+    // When the state is kHashCode, the non-state bits hold the hashcode.
+    kHashShift = 0,
+    kHashSize = 32 - kStateSize,
+    kHashMask = (1 << kHashSize) - 1,
   };
 
   static LockWord FromThinLockId(uint32_t thread_id, uint32_t count) {
     CHECK_LE(thread_id, static_cast<uint32_t>(kThinLockOwnerMask));
-    return LockWord((thread_id << kThinLockOwnerShift) | (count << kThinLockCountShift));
+    return LockWord((thread_id << kThinLockOwnerShift) | (count << kThinLockCountShift) |
+                     (kStateThinOrUnlocked << kStateShift));
+  }
+
+  static LockWord FromHashCode(uint32_t hash_code) {
+    CHECK_LE(hash_code, static_cast<uint32_t>(kHashMask));
+    return LockWord((hash_code << kHashShift) | (kStateHash << kStateShift));
   }
 
   enum LockState {
     kUnlocked,    // No lock owners.
     kThinLocked,  // Single uncontended owner.
-    kFatLocked    // See associated monitor.
+    kFatLocked,   // See associated monitor.
+    kHashCode,    // Lock word contains an identity hash.
   };
 
   LockState GetState() const {
+    uint32_t internal_state = (value_ >> kStateShift) & kStateMask;
     if (value_ == 0) {
       return kUnlocked;
-    } else if (((value_ >> kStateShift) & kStateMask) == kStateThinOrUnlocked) {
+    } else if (internal_state == kStateThinOrUnlocked) {
       return kThinLocked;
+    } else if (internal_state == kStateHash) {
+      return kHashCode;
     } else {
+      DCHECK_EQ(internal_state, static_cast<uint32_t>(kStateFat));
       return kFatLocked;
     }
   }
@@ -103,17 +127,20 @@
   // Constructor a lock word for inflation to use a Monitor.
   explicit LockWord(Monitor* mon);
 
-  bool operator==(const LockWord& rhs) {
+  bool operator==(const LockWord& rhs) const {
     return GetValue() == rhs.GetValue();
   }
 
- private:
-  explicit LockWord(uint32_t val) : value_(val) {}
+  // Return the hash code stored in the lock word, must be kHashCode state.
+  uint32_t GetHashCode() const;
 
   uint32_t GetValue() const {
     return value_;
   }
 
+ private:
+  explicit LockWord(uint32_t val) : value_(val) {}
+
   // Only Object should be converting LockWords to/from uints.
   friend class mirror::Object;
 
diff --git a/runtime/locks.h b/runtime/locks.h
index f63e2b1..2262218 100644
--- a/runtime/locks.h
+++ b/runtime/locks.h
@@ -53,8 +53,8 @@
   kJdwpAttachLock,
   kJdwpStartLock,
   kRuntimeShutdownLock,
-  kHeapBitmapLock,
   kMonitorLock,
+  kHeapBitmapLock,
   kMutatorLock,
   kZygoteCreationLock,
 
diff --git a/runtime/mem_map.cc b/runtime/mem_map.cc
index 00316f7..70d3457 100644
--- a/runtime/mem_map.cc
+++ b/runtime/mem_map.cc
@@ -170,12 +170,73 @@
   }
 };
 
-void MemMap::UnMapAtEnd(byte* new_end) {
+MemMap* MemMap::RemapAtEnd(byte* new_end, const char* tail_name, int tail_prot,
+                           std::string* error_msg) {
   DCHECK_GE(new_end, Begin());
   DCHECK_LE(new_end, End());
-  size_t unmap_size = End() - new_end;
-  munmap(new_end, unmap_size);
-  size_ -= unmap_size;
+  DCHECK_LE(begin_ + size_, reinterpret_cast<byte*>(base_begin_) + base_size_);
+  DCHECK(IsAligned<kPageSize>(begin_));
+  DCHECK(IsAligned<kPageSize>(base_begin_));
+  DCHECK(IsAligned<kPageSize>(reinterpret_cast<byte*>(base_begin_) + base_size_));
+  DCHECK(IsAligned<kPageSize>(new_end));
+  byte* old_end = begin_ + size_;
+  byte* old_base_end = reinterpret_cast<byte*>(base_begin_) + base_size_;
+  byte* new_base_end = new_end;
+  DCHECK_LE(new_base_end, old_base_end);
+  if (new_base_end == old_base_end) {
+    return new MemMap(tail_name, NULL, 0, NULL, 0, tail_prot);
+  }
+  size_ = new_end - reinterpret_cast<byte*>(begin_);
+  base_size_ = new_base_end - reinterpret_cast<byte*>(base_begin_);
+  DCHECK_LE(begin_ + size_, reinterpret_cast<byte*>(base_begin_) + base_size_);
+  size_t tail_size = old_end - new_end;
+  byte* tail_base_begin = new_base_end;
+  size_t tail_base_size = old_base_end - new_base_end;
+  DCHECK_EQ(tail_base_begin + tail_base_size, old_base_end);
+  DCHECK(IsAligned<kPageSize>(tail_base_size));
+
+#ifdef USE_ASHMEM
+  // android_os_Debug.cpp read_mapinfo assumes all ashmem regions associated with the VM are
+  // prefixed "dalvik-".
+  std::string debug_friendly_name("dalvik-");
+  debug_friendly_name += tail_name;
+  ScopedFd fd(ashmem_create_region(debug_friendly_name.c_str(), tail_base_size));
+  int flags = MAP_PRIVATE;
+  if (fd.get() == -1) {
+    *error_msg = StringPrintf("ashmem_create_region failed for '%s': %s",
+                              tail_name, strerror(errno));
+    return nullptr;
+  }
+#else
+  ScopedFd fd(-1);
+  int flags = MAP_PRIVATE | MAP_ANONYMOUS;
+#endif
+
+  // Unmap/map the tail region.
+  int result = munmap(tail_base_begin, tail_base_size);
+  if (result == -1) {
+    std::string maps;
+    ReadFileToString("/proc/self/maps", &maps);
+    *error_msg = StringPrintf("munmap(%p, %zd) failed for '%s'\n%s",
+                              tail_base_begin, tail_base_size, name_.c_str(),
+                              maps.c_str());
+    return nullptr;
+  }
+  // Don't cause memory allocation between the munmap and the mmap
+  // calls. Otherwise, libc (or something else) might take this memory
+  // region. Note this isn't perfect as there's no way to prevent
+  // other threads to try to take this memory region here.
+  byte* actual = reinterpret_cast<byte*>(mmap(tail_base_begin, tail_base_size, tail_prot,
+                                              flags, fd.get(), 0));
+  if (actual == MAP_FAILED) {
+    std::string maps;
+    ReadFileToString("/proc/self/maps", &maps);
+    *error_msg = StringPrintf("anonymous mmap(%p, %zd, %x, %x, %d, 0) failed\n%s",
+                              tail_base_begin, tail_base_size, tail_prot, flags, fd.get(),
+                              maps.c_str());
+    return nullptr;
+  }
+  return new MemMap(tail_name, actual, tail_size, actual, tail_base_size, tail_prot);
 }
 
 bool MemMap::Protect(int prot) {
diff --git a/runtime/mem_map.h b/runtime/mem_map.h
index 919463c..2c65833 100644
--- a/runtime/mem_map.h
+++ b/runtime/mem_map.h
@@ -84,8 +84,9 @@
     return Begin() <= addr && addr < End();
   }
 
-  // Trim by unmapping pages at the end of the map.
-  void UnMapAtEnd(byte* new_end);
+  // Unmap the pages at end and remap them to create another memory map.
+  MemMap* RemapAtEnd(byte* new_end, const char* tail_name, int tail_prot,
+                     std::string* error_msg);
 
  private:
   MemMap(const std::string& name, byte* begin, size_t size, void* base_begin, size_t base_size,
@@ -96,8 +97,10 @@
   size_t size_;  // Length of data.
 
   void* const base_begin_;  // Page-aligned base address.
-  const size_t base_size_;  // Length of mapping.
+  size_t base_size_;  // Length of mapping. May be changed by RemapAtEnd (ie Zygote).
   int prot_;  // Protection of the map.
+
+  friend class MemMapTest;  // To allow access to base_begin_ and base_size_.
 };
 
 }  // namespace art
diff --git a/runtime/mem_map_test.cc b/runtime/mem_map_test.cc
index 09de320..cf2c9d0 100644
--- a/runtime/mem_map_test.cc
+++ b/runtime/mem_map_test.cc
@@ -21,7 +21,15 @@
 
 namespace art {
 
-class MemMapTest : public testing::Test {};
+class MemMapTest : public testing::Test {
+ public:
+  byte* BaseBegin(MemMap* mem_map) {
+    return reinterpret_cast<byte*>(mem_map->base_begin_);
+  }
+  size_t BaseSize(MemMap* mem_map) {
+    return mem_map->base_size_;
+  }
+};
 
 TEST_F(MemMapTest, MapAnonymousEmpty) {
   std::string error_msg;
@@ -34,4 +42,57 @@
   ASSERT_TRUE(error_msg.empty());
 }
 
+TEST_F(MemMapTest, RemapAtEnd) {
+  std::string error_msg;
+  // Cast the page size to size_t.
+  const size_t page_size = static_cast<size_t>(kPageSize);
+  // Map a two-page memory region.
+  MemMap* m0 = MemMap::MapAnonymous("MemMapTest_RemapAtEndTest_map0",
+                                    NULL,
+                                    2 * page_size,
+                                    PROT_READ | PROT_WRITE,
+                                    &error_msg);
+  // Check its state and write to it.
+  byte* base0 = m0->Begin();
+  ASSERT_TRUE(base0 != NULL) << error_msg;
+  size_t size0 = m0->Size();
+  EXPECT_EQ(m0->Size(), 2 * page_size);
+  EXPECT_EQ(BaseBegin(m0), base0);
+  EXPECT_EQ(BaseSize(m0), size0);
+  memset(base0, 42, 2 * page_size);
+  // Remap the latter half into a second MemMap.
+  MemMap* m1 = m0->RemapAtEnd(base0 + page_size,
+                              "MemMapTest_RemapAtEndTest_map1",
+                              PROT_READ | PROT_WRITE,
+                              &error_msg);
+  // Check the states of the two maps.
+  EXPECT_EQ(m0->Begin(), base0) << error_msg;
+  EXPECT_EQ(m0->Size(), page_size);
+  EXPECT_EQ(BaseBegin(m0), base0);
+  EXPECT_EQ(BaseSize(m0), page_size);
+  byte* base1 = m1->Begin();
+  size_t size1 = m1->Size();
+  EXPECT_EQ(base1, base0 + page_size);
+  EXPECT_EQ(size1, page_size);
+  EXPECT_EQ(BaseBegin(m1), base1);
+  EXPECT_EQ(BaseSize(m1), size1);
+  // Write to the second region.
+  memset(base1, 43, page_size);
+  // Check the contents of the two regions.
+  for (size_t i = 0; i < page_size; ++i) {
+    EXPECT_EQ(base0[i], 42);
+  }
+  for (size_t i = 0; i < page_size; ++i) {
+    EXPECT_EQ(base1[i], 43);
+  }
+  // Unmap the first region.
+  delete m0;
+  // Make sure the second region is still accessible after the first
+  // region is unmapped.
+  for (size_t i = 0; i < page_size; ++i) {
+    EXPECT_EQ(base1[i], 43);
+  }
+  delete m1;
+}
+
 }  // namespace art
diff --git a/runtime/mirror/art_method-inl.h b/runtime/mirror/art_method-inl.h
index ccf3e59..c9bf160 100644
--- a/runtime/mirror/art_method-inl.h
+++ b/runtime/mirror/art_method-inl.h
@@ -202,6 +202,13 @@
   DCHECK(!result || IsRuntimeMethod());
   return result;
 }
+
+inline bool ArtMethod::IsImtConflictMethod() const {
+  bool result = this == Runtime::Current()->GetImtConflictMethod();
+  // Check that if we do think it is phony it looks like the imt conflict method.
+  DCHECK(!result || IsRuntimeMethod());
+  return result;
+}
 }  // namespace mirror
 }  // namespace art
 
diff --git a/runtime/mirror/art_method.h b/runtime/mirror/art_method.h
index 0520893..f396fbe 100644
--- a/runtime/mirror/art_method.h
+++ b/runtime/mirror/art_method.h
@@ -357,6 +357,8 @@
 
   bool IsResolutionMethod() const;
 
+  bool IsImtConflictMethod() const;
+
   uintptr_t NativePcOffset(const uintptr_t pc) const SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
 
   // Converts a native PC to a dex PC.
diff --git a/runtime/mirror/class-inl.h b/runtime/mirror/class-inl.h
index cd5e865..7f3a302 100644
--- a/runtime/mirror/class-inl.h
+++ b/runtime/mirror/class-inl.h
@@ -139,6 +139,14 @@
   SetFieldObject(OFFSET_OF_OBJECT_MEMBER(Class, vtable_), new_vtable, false);
 }
 
+inline ObjectArray<ArtMethod>* Class::GetImTable() const {
+  return GetFieldObject<ObjectArray<ArtMethod>*>(OFFSET_OF_OBJECT_MEMBER(Class, imtable_), false);
+}
+
+inline void Class::SetImTable(ObjectArray<ArtMethod>* new_imtable) {
+  SetFieldObject(OFFSET_OF_OBJECT_MEMBER(Class, imtable_), new_imtable, false);
+}
+
 inline bool Class::Implements(const Class* klass) const {
   DCHECK(klass != NULL);
   DCHECK(klass->IsInterface()) << PrettyClass(this);
diff --git a/runtime/mirror/class.h b/runtime/mirror/class.h
index d15f337..ed1aad3 100644
--- a/runtime/mirror/class.h
+++ b/runtime/mirror/class.h
@@ -555,6 +555,15 @@
     return OFFSET_OF_OBJECT_MEMBER(Class, vtable_);
   }
 
+  ObjectArray<ArtMethod>* GetImTable() const;
+
+  void SetImTable(ObjectArray<ArtMethod>* new_imtable)
+      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+
+  static MemberOffset ImTableOffset() {
+    return OFFSET_OF_OBJECT_MEMBER(Class, imtable_);
+  }
+
   // Given a method implemented by this class but potentially from a super class, return the
   // specific implementation method for this class.
   ArtMethod* FindVirtualMethodForVirtual(ArtMethod* method) const
@@ -830,6 +839,9 @@
   // methods for the methods in the interface.
   IfTable* iftable_;
 
+  // Interface method table (imt), for quick "invoke-interface".
+  ObjectArray<ArtMethod>* imtable_;
+
   // descriptor for the class such as "java.lang.Class" or "[C". Lazily initialized by ComputeName
   String* name_;
 
@@ -912,6 +924,7 @@
 
 class MANAGED ClassClass : public Class {
  private:
+  int32_t pad_;
   int64_t serialVersionUID_;
   friend struct art::ClassClassOffsets;  // for verifying offset information
   DISALLOW_IMPLICIT_CONSTRUCTORS(ClassClass);
diff --git a/runtime/mirror/object-inl.h b/runtime/mirror/object-inl.h
index e460a8d..7ac2c8c 100644
--- a/runtime/mirror/object-inl.h
+++ b/runtime/mirror/object-inl.h
@@ -44,7 +44,7 @@
   SetFieldPtr(OFFSET_OF_OBJECT_MEMBER(Object, klass_), new_klass, false, false);
 }
 
-inline LockWord Object::GetLockWord() {
+inline LockWord Object::GetLockWord() const {
   return LockWord(GetField32(OFFSET_OF_OBJECT_MEMBER(Object, monitor_), true));
 }
 
diff --git a/runtime/mirror/object.cc b/runtime/mirror/object.cc
index 92c05b2..49bad4c 100644
--- a/runtime/mirror/object.cc
+++ b/runtime/mirror/object.cc
@@ -14,6 +14,8 @@
  * limitations under the License.
  */
 
+#include <ctime>
+
 #include "object.h"
 
 #include "art_field.h"
@@ -82,6 +84,52 @@
   return copy.get();
 }
 
+uint32_t Object::GenerateIdentityHashCode() {
+  static AtomicInteger seed(987654321 + std::time(nullptr));
+  uint32_t expected_value, new_value;
+  do {
+    expected_value = static_cast<uint32_t>(seed.load());
+    new_value = expected_value * 1103515245 + 12345;
+  } while (!seed.compare_and_swap(static_cast<int32_t>(expected_value),
+                                  static_cast<int32_t>(new_value)));
+  return expected_value & LockWord::kHashMask;
+}
+
+int32_t Object::IdentityHashCode() const {
+  while (true) {
+    LockWord lw = GetLockWord();
+    switch (lw.GetState()) {
+      case LockWord::kUnlocked: {
+        // Try to compare and swap in a new hash, if we succeed we will return the hash on the next
+        // loop iteration.
+        LockWord hash_word(LockWord::FromHashCode(GenerateIdentityHashCode()));
+        DCHECK_EQ(hash_word.GetState(), LockWord::kHashCode);
+        if (const_cast<Object*>(this)->CasLockWord(lw, hash_word)) {
+          return hash_word.GetHashCode();
+        }
+        break;
+      }
+      case LockWord::kThinLocked: {
+        // Inflate the thin lock to a monitor and stick the hash code inside of the monitor.
+        Thread* self = Thread::Current();
+        Monitor::InflateThinLocked(self, const_cast<Object*>(this), lw, GenerateIdentityHashCode());
+        break;
+      }
+      case LockWord::kFatLocked: {
+        // Already inflated, return the has stored in the monitor.
+        Monitor* monitor = lw.FatLockMonitor();
+        DCHECK(monitor != nullptr);
+        return monitor->GetHashCode();
+      }
+      case LockWord::kHashCode: {
+        return lw.GetHashCode();
+      }
+    }
+  }
+  LOG(FATAL) << "Unreachable";
+  return 0;
+}
+
 void Object::CheckFieldAssignmentImpl(MemberOffset field_offset, const Object* new_value) {
   const Class* c = GetClass();
   if (Runtime::Current()->GetClassLinker() == NULL ||
diff --git a/runtime/mirror/object.h b/runtime/mirror/object.h
index e3f5c10..11473cd 100644
--- a/runtime/mirror/object.h
+++ b/runtime/mirror/object.h
@@ -27,6 +27,7 @@
 
 class ImageWriter;
 class LockWord;
+class Monitor;
 struct ObjectOffsets;
 class Thread;
 
@@ -84,19 +85,13 @@
 
   Object* Clone(Thread* self) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
 
-  int32_t IdentityHashCode() const {
-#ifdef MOVING_GARBAGE_COLLECTOR
-    // TODO: we'll need to use the Object's internal concept of identity
-    UNIMPLEMENTED(FATAL);
-#endif
-    return reinterpret_cast<int32_t>(this);
-  }
+  int32_t IdentityHashCode() const SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
 
   static MemberOffset MonitorOffset() {
     return OFFSET_OF_OBJECT_MEMBER(Object, monitor_);
   }
 
-  LockWord GetLockWord();
+  LockWord GetLockWord() const;
   void SetLockWord(LockWord new_val);
   bool CasLockWord(LockWord old_val, LockWord new_val);
   uint32_t GetLockOwnerThreadId();
@@ -243,7 +238,6 @@
 
  private:
   static void VerifyObject(const Object* obj) ALWAYS_INLINE;
-
   // Verify the type correctness of stores to fields.
   void CheckFieldAssignmentImpl(MemberOffset field_offset, const Object* new_value)
       SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
@@ -254,6 +248,9 @@
     }
   }
 
+  // Generate an identity hash code.
+  static uint32_t GenerateIdentityHashCode();
+
   // Write barrier called post update to a reference bearing field.
   static void WriteBarrierField(const Object* dst, MemberOffset offset, const Object* new_value);
 
@@ -262,6 +259,7 @@
   uint32_t monitor_;
 
   friend class art::ImageWriter;
+  friend class art::Monitor;
   friend struct art::ObjectOffsets;  // for verifying offset information
   DISALLOW_IMPLICIT_CONSTRUCTORS(Object);
 };
diff --git a/runtime/mirror/object_test.cc b/runtime/mirror/object_test.cc
index 6c6d488..d0d1ee4 100644
--- a/runtime/mirror/object_test.cc
+++ b/runtime/mirror/object_test.cc
@@ -84,6 +84,7 @@
   EXPECT_EQ(STRING_OFFSET_OFFSET, String::OffsetOffset().Int32Value());
   EXPECT_EQ(STRING_DATA_OFFSET, Array::DataOffset(sizeof(uint16_t)).Int32Value());
 
+  EXPECT_EQ(METHOD_DEX_CACHE_METHODS_OFFSET, ArtMethod::DexCacheResolvedMethodsOffset().Int32Value());
   EXPECT_EQ(METHOD_CODE_OFFSET, ArtMethod::EntryPointFromCompiledCodeOffset().Int32Value());
 }
 
diff --git a/runtime/mirror/string.h b/runtime/mirror/string.h
index 1879f04..01d8f31 100644
--- a/runtime/mirror/string.h
+++ b/runtime/mirror/string.h
@@ -156,8 +156,8 @@
  private:
   CharArray* ASCII_;
   Object* CASE_INSENSITIVE_ORDER_;
-  int64_t serialVersionUID_;
   uint32_t REPLACEMENT_CHAR_;
+  int64_t serialVersionUID_;
   friend struct art::StringClassOffsets;  // for verifying offset information
   DISALLOW_IMPLICIT_CONSTRUCTORS(StringClass);
 };
diff --git a/runtime/monitor.cc b/runtime/monitor.cc
index a5605ff..b1bf84f 100644
--- a/runtime/monitor.cc
+++ b/runtime/monitor.cc
@@ -79,35 +79,49 @@
   is_sensitive_thread_hook_ = is_sensitive_thread_hook;
 }
 
-Monitor::Monitor(Thread* owner, mirror::Object* obj)
+Monitor::Monitor(Thread* owner, mirror::Object* obj, uint32_t hash_code)
     : monitor_lock_("a monitor lock", kMonitorLock),
       monitor_contenders_("monitor contenders", monitor_lock_),
       owner_(owner),
       lock_count_(0),
       obj_(obj),
       wait_set_(NULL),
+      hash_code_(hash_code),
       locking_method_(NULL),
       locking_dex_pc_(0) {
   // We should only inflate a lock if the owner is ourselves or suspended. This avoids a race
   // with the owner unlocking the thin-lock.
-  CHECK(owner == Thread::Current() || owner->IsSuspended());
+  CHECK(owner == nullptr || owner == Thread::Current() || owner->IsSuspended());
+  // The identity hash code is set for the life time of the monitor.
 }
 
 bool Monitor::Install(Thread* self) {
   MutexLock mu(self, monitor_lock_);  // Uncontended mutex acquisition as monitor isn't yet public.
-  CHECK(owner_ == self || owner_->IsSuspended());
+  CHECK(owner_ == nullptr || owner_ == self || owner_->IsSuspended());
   // Propagate the lock state.
-  LockWord thin(obj_->GetLockWord());
-  if (thin.GetState() != LockWord::kThinLocked) {
-    // The owner_ is suspended but another thread beat us to install a monitor.
-    CHECK_EQ(thin.GetState(), LockWord::kFatLocked);
-    return false;
+  LockWord lw(obj_->GetLockWord());
+  switch (lw.GetState()) {
+    case LockWord::kThinLocked: {
+      CHECK_EQ(owner_->GetThreadId(), lw.ThinLockOwner());
+      lock_count_ = lw.ThinLockCount();
+      break;
+    }
+    case LockWord::kHashCode: {
+      CHECK_EQ(hash_code_, lw.GetHashCode());
+      break;
+    }
+    case LockWord::kFatLocked: {
+      // The owner_ is suspended but another thread beat us to install a monitor.
+      return false;
+    }
+    case LockWord::kUnlocked: {
+      LOG(FATAL) << "Inflating unlocked lock word";
+      break;
+    }
   }
-  CHECK_EQ(owner_->GetThreadId(), thin.ThinLockOwner());
-  lock_count_ = thin.ThinLockCount();
   LockWord fat(this);
   // Publish the updated lock word, which may race with other threads.
-  bool success = obj_->CasLockWord(thin, fat);
+  bool success = obj_->CasLockWord(lw, fat);
   // Lock profiling.
   if (success && lock_profiling_threshold_ != 0) {
     locking_method_ = owner_->GetCurrentMethod(&locking_dex_pc_);
@@ -540,19 +554,46 @@
  * thread must own the lock or the owner must be suspended. There's a race with other threads
  * inflating the lock and so the caller should read the monitor following the call.
  */
-void Monitor::Inflate(Thread* self, Thread* owner, mirror::Object* obj) {
+void Monitor::Inflate(Thread* self, Thread* owner, mirror::Object* obj, int32_t hash_code) {
   DCHECK(self != NULL);
-  DCHECK(owner != NULL);
   DCHECK(obj != NULL);
-
   // Allocate and acquire a new monitor.
-  UniquePtr<Monitor> m(new Monitor(owner, obj));
+  UniquePtr<Monitor> m(new Monitor(owner, obj, hash_code));
   if (m->Install(self)) {
     VLOG(monitor) << "monitor: thread " << owner->GetThreadId()
                     << " created monitor " << m.get() << " for object " << obj;
     Runtime::Current()->GetMonitorList()->Add(m.release());
+    CHECK_EQ(obj->GetLockWord().GetState(), LockWord::kFatLocked);
   }
-  CHECK_EQ(obj->GetLockWord().GetState(), LockWord::kFatLocked);
+}
+
+void Monitor::InflateThinLocked(Thread* self, mirror::Object* obj, LockWord lock_word,
+                                uint32_t hash_code) {
+  DCHECK_EQ(lock_word.GetState(), LockWord::kThinLocked);
+  uint32_t owner_thread_id = lock_word.ThinLockOwner();
+  if (owner_thread_id == self->GetThreadId()) {
+    // We own the monitor, we can easily inflate it.
+    Inflate(self, self, obj, hash_code);
+  } else {
+    ThreadList* thread_list = Runtime::Current()->GetThreadList();
+    // Suspend the owner, inflate. First change to blocked and give up mutator_lock_.
+    ScopedThreadStateChange tsc(self, kBlocked);
+    if (lock_word == obj->GetLockWord()) {  // If lock word hasn't changed.
+      bool timed_out;
+      Thread* owner = thread_list->SuspendThreadByThreadId(lock_word.ThinLockOwner(), false,
+                                                           &timed_out);
+      if (owner != nullptr) {
+        // We succeeded in suspending the thread, check the lock's status didn't change.
+        lock_word = obj->GetLockWord();
+        if (lock_word.GetState() == LockWord::kThinLocked &&
+            lock_word.ThinLockOwner() == owner_thread_id) {
+          // Go ahead and inflate the lock.
+          Inflate(self, owner, obj, hash_code);
+        }
+        thread_list->Resume(owner, false);
+      }
+    }
+  }
 }
 
 void Monitor::MonitorEnter(Thread* self, mirror::Object* obj) {
@@ -560,7 +601,6 @@
   DCHECK(obj != NULL);
   uint32_t thread_id = self->GetThreadId();
   size_t contention_count = 0;
-
   while (true) {
     LockWord lock_word = obj->GetLockWord();
     switch (lock_word.GetState()) {
@@ -582,33 +622,17 @@
             return;  // Success!
           } else {
             // We'd overflow the recursion count, so inflate the monitor.
-            Inflate(self, self, obj);
+            InflateThinLocked(self, obj, lock_word, mirror::Object::GenerateIdentityHashCode());
           }
         } else {
           // Contention.
           contention_count++;
-          if (contention_count <= Runtime::Current()->GetMaxSpinsBeforeThinkLockInflation()) {
+          Runtime* runtime = Runtime::Current();
+          if (contention_count <= runtime->GetMaxSpinsBeforeThinkLockInflation()) {
             NanoSleep(1000);  // Sleep for 1us and re-attempt.
           } else {
             contention_count = 0;
-            // Suspend the owner, inflate. First change to blocked and give up mutator_lock_.
-            ScopedThreadStateChange tsc(self, kBlocked);
-            bool timed_out;
-            ThreadList* thread_list = Runtime::Current()->GetThreadList();
-            if (lock_word == obj->GetLockWord()) {  // If lock word hasn't changed.
-              Thread* owner = thread_list->SuspendThreadByThreadId(lock_word.ThinLockOwner(), false,
-                                                                   &timed_out);
-              if (owner != NULL) {
-                // We succeeded in suspending the thread, check the lock's status didn't change.
-                lock_word = obj->GetLockWord();
-                if (lock_word.GetState() == LockWord::kThinLocked &&
-                    lock_word.ThinLockOwner() == owner_thread_id) {
-                  // Go ahead and inflate the lock.
-                  Inflate(self, owner, obj);
-                }
-                thread_list->Resume(owner, false);
-              }
-            }
+            InflateThinLocked(self, obj, lock_word, mirror::Object::GenerateIdentityHashCode());
           }
         }
         continue;  // Start from the beginning.
@@ -618,6 +642,11 @@
         mon->Lock(self);
         return;  // Success!
       }
+      case LockWord::kHashCode: {
+        // Inflate with the existing hashcode.
+        Inflate(self, nullptr, obj, lock_word.GetHashCode());
+        break;
+      }
     }
   }
 }
@@ -628,6 +657,8 @@
 
   LockWord lock_word = obj->GetLockWord();
   switch (lock_word.GetState()) {
+    case LockWord::kHashCode:
+      // Fall-through.
     case LockWord::kUnlocked:
       FailedUnlock(obj, self, NULL, NULL);
       return false;  // Failure.
@@ -672,6 +703,8 @@
 
   LockWord lock_word = obj->GetLockWord();
   switch (lock_word.GetState()) {
+    case LockWord::kHashCode:
+      // Fall-through.
     case LockWord::kUnlocked:
       ThrowIllegalMonitorStateExceptionF("object not locked by thread before wait()");
       return;  // Failure.
@@ -683,7 +716,7 @@
         return;  // Failure.
       } else {
         // We own the lock, inflate to enqueue ourself on the Monitor.
-        Inflate(self, self, obj);
+        Inflate(self, self, obj, mirror::Object::GenerateIdentityHashCode());
         lock_word = obj->GetLockWord();
       }
       break;
@@ -701,6 +734,8 @@
 
   LockWord lock_word = obj->GetLockWord();
   switch (lock_word.GetState()) {
+    case LockWord::kHashCode:
+      // Fall-through.
     case LockWord::kUnlocked:
       ThrowIllegalMonitorStateExceptionF("object not locked by thread before notify()");
       return;  // Failure.
@@ -732,6 +767,8 @@
 
   LockWord lock_word = obj->GetLockWord();
   switch (lock_word.GetState()) {
+    case LockWord::kHashCode:
+      // Fall-through.
     case LockWord::kUnlocked:
       return ThreadList::kInvalidThreadId;
     case LockWord::kThinLocked:
@@ -889,12 +926,19 @@
       }
       return false;  // Fail - unowned monitor in an object.
     }
+    case LockWord::kHashCode:
+      return true;
     default:
       LOG(FATAL) << "Unreachable";
       return false;
   }
 }
 
+bool Monitor::IsLocked() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+  MutexLock mu(Thread::Current(), monitor_lock_);
+  return owner_ != nullptr;
+}
+
 void Monitor::TranslateLocation(const mirror::ArtMethod* method, uint32_t dex_pc,
                                 const char** source_file, uint32_t* line_number) const {
   // If method is null, location is unknown
@@ -976,6 +1020,8 @@
   LockWord lock_word = obj->GetLockWord();
   switch (lock_word.GetState()) {
     case LockWord::kUnlocked:
+      // Fall-through.
+    case LockWord::kHashCode:
       break;
     case LockWord::kThinLocked:
       owner_ = Runtime::Current()->GetThreadList()->FindThreadByThreadId(lock_word.ThinLockOwner());
diff --git a/runtime/monitor.h b/runtime/monitor.h
index 27124a2..c464400 100644
--- a/runtime/monitor.h
+++ b/runtime/monitor.h
@@ -88,8 +88,7 @@
 
   static bool IsValidLockWord(LockWord lock_word);
 
-  // TODO: SHARED_LOCKS_REQUIRED(Locks::mutator_lock_)
-  mirror::Object* GetObject() const {
+  mirror::Object* GetObject() const SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
     return obj_;
   }
 
@@ -99,8 +98,17 @@
     return owner_;
   }
 
+  int32_t GetHashCode() const {
+    return hash_code_;
+  }
+
+  bool IsLocked() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+
+  static void InflateThinLocked(Thread* self, mirror::Object* obj, LockWord lock_word,
+                                uint32_t hash_code) NO_THREAD_SAFETY_ANALYSIS;
+
  private:
-  explicit Monitor(Thread* owner, mirror::Object* obj)
+  explicit Monitor(Thread* owner, mirror::Object* obj, uint32_t hash_code)
       SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
 
   // Install the monitor into its object, may fail if another thread installs a different monitor
@@ -112,7 +120,7 @@
   void AppendToWaitSet(Thread* thread) EXCLUSIVE_LOCKS_REQUIRED(monitor_lock_);
   void RemoveFromWaitSet(Thread* thread) EXCLUSIVE_LOCKS_REQUIRED(monitor_lock_);
 
-  static void Inflate(Thread* self, Thread* owner, mirror::Object* obj)
+  static void Inflate(Thread* self, Thread* owner, mirror::Object* obj, int32_t hash_code)
       SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
 
   void LogContentionEvent(Thread* self, uint32_t wait_ms, uint32_t sample_percent,
@@ -171,6 +179,9 @@
   // Threads currently waiting on this monitor.
   Thread* wait_set_ GUARDED_BY(monitor_lock_);
 
+  // Stored object hash code, always generated.
+  const uint32_t hash_code_;
+
   // Method and dex pc where the lock owner acquired the lock, used when lock
   // sampling is enabled. locking_method_ may be null if the lock is currently
   // unlocked, or if the lock is acquired by the system when the stack is empty.
@@ -190,7 +201,7 @@
 
   void Add(Monitor* m);
 
-  void SweepMonitorList(RootVisitor visitor, void* arg);
+  void SweepMonitorList(RootVisitor visitor, void* arg) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
   void DisallowNewMonitors();
   void AllowNewMonitors();
 
diff --git a/runtime/native/java_lang_System.cc b/runtime/native/java_lang_System.cc
index 6674db2..ea78e04 100644
--- a/runtime/native/java_lang_System.cc
+++ b/runtime/native/java_lang_System.cc
@@ -339,6 +339,9 @@
 }
 
 static jint System_identityHashCode(JNIEnv* env, jclass, jobject javaObject) {
+  if (javaObject == nullptr) {
+    return 0;
+  }
   ScopedFastNativeObjectAccess soa(env);
   mirror::Object* o = soa.Decode<mirror::Object*>(javaObject);
   return static_cast<jint>(o->IdentityHashCode());
diff --git a/runtime/oat.cc b/runtime/oat.cc
index 6fe5d10..defda6b 100644
--- a/runtime/oat.cc
+++ b/runtime/oat.cc
@@ -22,7 +22,7 @@
 namespace art {
 
 const uint8_t OatHeader::kOatMagic[] = { 'o', 'a', 't', '\n' };
-const uint8_t OatHeader::kOatVersion[] = { '0', '0', '8', '\0' };
+const uint8_t OatHeader::kOatVersion[] = { '0', '0', '9', '\0' };
 
 OatHeader::OatHeader() {
   memset(this, 0, sizeof(*this));
@@ -60,8 +60,10 @@
   interpreter_to_interpreter_bridge_offset_ = 0;
   interpreter_to_compiled_code_bridge_offset_ = 0;
   jni_dlsym_lookup_offset_ = 0;
+  portable_imt_conflict_trampoline_offset_ = 0;
   portable_resolution_trampoline_offset_ = 0;
   portable_to_interpreter_bridge_offset_ = 0;
+  quick_imt_conflict_trampoline_offset_ = 0;
   quick_resolution_trampoline_offset_ = 0;
   quick_to_interpreter_bridge_offset_ = 0;
 }
@@ -171,18 +173,37 @@
   UpdateChecksum(&jni_dlsym_lookup_offset_, sizeof(offset));
 }
 
+const void* OatHeader::GetPortableImtConflictTrampoline() const {
+  return reinterpret_cast<const uint8_t*>(this) + GetPortableImtConflictTrampolineOffset();
+}
+
+uint32_t OatHeader::GetPortableImtConflictTrampolineOffset() const {
+  DCHECK(IsValid());
+  CHECK_GE(portable_imt_conflict_trampoline_offset_, jni_dlsym_lookup_offset_);
+  return portable_imt_conflict_trampoline_offset_;
+}
+
+void OatHeader::SetPortableImtConflictTrampolineOffset(uint32_t offset) {
+  CHECK(offset == 0 || offset >= jni_dlsym_lookup_offset_);
+  DCHECK(IsValid());
+  DCHECK_EQ(portable_imt_conflict_trampoline_offset_, 0U) << offset;
+
+  portable_imt_conflict_trampoline_offset_ = offset;
+  UpdateChecksum(&portable_imt_conflict_trampoline_offset_, sizeof(offset));
+}
+
 const void* OatHeader::GetPortableResolutionTrampoline() const {
   return reinterpret_cast<const uint8_t*>(this) + GetPortableResolutionTrampolineOffset();
 }
 
 uint32_t OatHeader::GetPortableResolutionTrampolineOffset() const {
   DCHECK(IsValid());
-  CHECK_GE(portable_resolution_trampoline_offset_, jni_dlsym_lookup_offset_);
+  CHECK_GE(portable_resolution_trampoline_offset_, portable_imt_conflict_trampoline_offset_);
   return portable_resolution_trampoline_offset_;
 }
 
 void OatHeader::SetPortableResolutionTrampolineOffset(uint32_t offset) {
-  CHECK(offset == 0 || offset >= jni_dlsym_lookup_offset_);
+  CHECK(offset == 0 || offset >= portable_imt_conflict_trampoline_offset_);
   DCHECK(IsValid());
   DCHECK_EQ(portable_resolution_trampoline_offset_, 0U) << offset;
 
@@ -209,18 +230,37 @@
   UpdateChecksum(&portable_to_interpreter_bridge_offset_, sizeof(offset));
 }
 
+const void* OatHeader::GetQuickImtConflictTrampoline() const {
+  return reinterpret_cast<const uint8_t*>(this) + GetQuickImtConflictTrampolineOffset();
+}
+
+uint32_t OatHeader::GetQuickImtConflictTrampolineOffset() const {
+  DCHECK(IsValid());
+  CHECK_GE(quick_imt_conflict_trampoline_offset_, portable_to_interpreter_bridge_offset_);
+  return quick_imt_conflict_trampoline_offset_;
+}
+
+void OatHeader::SetQuickImtConflictTrampolineOffset(uint32_t offset) {
+  CHECK(offset == 0 || offset >= portable_to_interpreter_bridge_offset_);
+  DCHECK(IsValid());
+  DCHECK_EQ(quick_imt_conflict_trampoline_offset_, 0U) << offset;
+
+  quick_imt_conflict_trampoline_offset_ = offset;
+  UpdateChecksum(&quick_imt_conflict_trampoline_offset_, sizeof(offset));
+}
+
 const void* OatHeader::GetQuickResolutionTrampoline() const {
   return reinterpret_cast<const uint8_t*>(this) + GetQuickResolutionTrampolineOffset();
 }
 
 uint32_t OatHeader::GetQuickResolutionTrampolineOffset() const {
   DCHECK(IsValid());
-  CHECK_GE(quick_resolution_trampoline_offset_, portable_to_interpreter_bridge_offset_);
+  CHECK_GE(quick_resolution_trampoline_offset_, quick_imt_conflict_trampoline_offset_);
   return quick_resolution_trampoline_offset_;
 }
 
 void OatHeader::SetQuickResolutionTrampolineOffset(uint32_t offset) {
-  CHECK(offset == 0 || offset >= portable_to_interpreter_bridge_offset_);
+  CHECK(offset == 0 || offset >= quick_imt_conflict_trampoline_offset_);
   DCHECK(IsValid());
   DCHECK_EQ(quick_resolution_trampoline_offset_, 0U) << offset;
 
diff --git a/runtime/oat.h b/runtime/oat.h
index a9dc540..c864c2c 100644
--- a/runtime/oat.h
+++ b/runtime/oat.h
@@ -62,6 +62,9 @@
   const void* GetPortableResolutionTrampoline() const;
   uint32_t GetPortableResolutionTrampolineOffset() const;
   void SetPortableResolutionTrampolineOffset(uint32_t offset);
+  const void* GetPortableImtConflictTrampoline() const;
+  uint32_t GetPortableImtConflictTrampolineOffset() const;
+  void SetPortableImtConflictTrampolineOffset(uint32_t offset);
   const void* GetPortableToInterpreterBridge() const;
   uint32_t GetPortableToInterpreterBridgeOffset() const;
   void SetPortableToInterpreterBridgeOffset(uint32_t offset);
@@ -69,6 +72,9 @@
   const void* GetQuickResolutionTrampoline() const;
   uint32_t GetQuickResolutionTrampolineOffset() const;
   void SetQuickResolutionTrampolineOffset(uint32_t offset);
+  const void* GetQuickImtConflictTrampoline() const;
+  uint32_t GetQuickImtConflictTrampolineOffset() const;
+  void SetQuickImtConflictTrampolineOffset(uint32_t offset);
   const void* GetQuickToInterpreterBridge() const;
   uint32_t GetQuickToInterpreterBridgeOffset() const;
   void SetQuickToInterpreterBridgeOffset(uint32_t offset);
@@ -91,8 +97,10 @@
   uint32_t interpreter_to_interpreter_bridge_offset_;
   uint32_t interpreter_to_compiled_code_bridge_offset_;
   uint32_t jni_dlsym_lookup_offset_;
+  uint32_t portable_imt_conflict_trampoline_offset_;
   uint32_t portable_resolution_trampoline_offset_;
   uint32_t portable_to_interpreter_bridge_offset_;
+  uint32_t quick_imt_conflict_trampoline_offset_;
   uint32_t quick_resolution_trampoline_offset_;
   uint32_t quick_to_interpreter_bridge_offset_;
 
diff --git a/runtime/object_utils.h b/runtime/object_utils.h
index 3ca3c0b..bf25b81 100644
--- a/runtime/object_utils.h
+++ b/runtime/object_utils.h
@@ -458,6 +458,8 @@
       Runtime* runtime = Runtime::Current();
       if (method_ == runtime->GetResolutionMethod()) {
         return "<runtime internal resolution method>";
+      } else if (method_ == runtime->GetImtConflictMethod()) {
+        return "<runtime internal imt conflict method>";
       } else if (method_ == runtime->GetCalleeSaveMethod(Runtime::kSaveAll)) {
         return "<runtime internal callee-save all registers method>";
       } else if (method_ == runtime->GetCalleeSaveMethod(Runtime::kRefsOnly)) {
diff --git a/runtime/runtime.cc b/runtime/runtime.cc
index f46b794..34cf45b 100644
--- a/runtime/runtime.cc
+++ b/runtime/runtime.cc
@@ -84,6 +84,8 @@
       java_vm_(NULL),
       pre_allocated_OutOfMemoryError_(NULL),
       resolution_method_(NULL),
+      imt_conflict_method_(NULL),
+      default_imt_(NULL),
       threads_being_born_(0),
       shutdown_cond_(new ConditionVariable("Runtime shutdown", *Locks::runtime_shutdown_lock_)),
       shutting_down_(false),
@@ -1175,6 +1177,10 @@
   }
   resolution_method_ = reinterpret_cast<mirror::ArtMethod*>(visitor(resolution_method_, arg));
   DCHECK(resolution_method_ != nullptr);
+  imt_conflict_method_ = reinterpret_cast<mirror::ArtMethod*>(visitor(imt_conflict_method_, arg));
+  DCHECK(imt_conflict_method_ != nullptr);
+  default_imt_ = reinterpret_cast<mirror::ObjectArray<mirror::ArtMethod>*>(visitor(default_imt_, arg));
+  DCHECK(default_imt_ != nullptr);
   for (int i = 0; i < Runtime::kLastCalleeSaveType; i++) {
     callee_save_methods_[i] = reinterpret_cast<mirror::ArtMethod*>(
         visitor(callee_save_methods_[i], arg));
@@ -1192,6 +1198,31 @@
   VisitNonConcurrentRoots(visitor, arg);
 }
 
+mirror::ObjectArray<mirror::ArtMethod>* Runtime::CreateDefaultImt(ClassLinker* cl) {
+  Thread* self = Thread::Current();
+  SirtRef<mirror::ObjectArray<mirror::ArtMethod> > imtable(self, cl->AllocArtMethodArray(self, 64));
+  mirror::ArtMethod* imt_conflict_method = Runtime::Current()->GetImtConflictMethod();
+  for (size_t i = 0; i < 64; i++) {
+    imtable->Set(i, imt_conflict_method);
+  }
+  return imtable.get();
+}
+
+mirror::ArtMethod* Runtime::CreateImtConflictMethod() {
+  mirror::Class* method_class = mirror::ArtMethod::GetJavaLangReflectArtMethod();
+  Thread* self = Thread::Current();
+  SirtRef<mirror::ArtMethod>
+      method(self, down_cast<mirror::ArtMethod*>(method_class->AllocObject(self)));
+  method->SetDeclaringClass(method_class);
+  // TODO: use a special method for imt conflict method saves
+  method->SetDexMethodIndex(DexFile::kDexNoIndex);
+  // When compiling, the code pointer will get set later when the image is loaded.
+  Runtime* r = Runtime::Current();
+  ClassLinker* cl = r->GetClassLinker();
+  method->SetEntryPointFromCompiledCode(r->IsCompiler() ? NULL : GetImtConflictTrampoline(cl));
+  return method.get();
+}
+
 mirror::ArtMethod* Runtime::CreateResolutionMethod() {
   mirror::Class* method_class = mirror::ArtMethod::GetJavaLangReflectArtMethod();
   Thread* self = Thread::Current();
diff --git a/runtime/runtime.h b/runtime/runtime.h
index b6429b6..0ce2642 100644
--- a/runtime/runtime.h
+++ b/runtime/runtime.h
@@ -45,6 +45,7 @@
 namespace mirror {
   class ArtMethod;
   class ClassLoader;
+  template<class T> class ObjectArray;
   template<class T> class PrimitiveArray;
   typedef PrimitiveArray<int8_t> ByteArray;
   class String;
@@ -331,7 +332,8 @@
 
   // Sweep system weaks, the system weak is deleted if the visitor return nullptr. Otherwise, the
   // system weak is updated to be the visitor's returned value.
-  void SweepSystemWeaks(RootVisitor* visitor, void* arg);
+  void SweepSystemWeaks(RootVisitor* visitor, void* arg)
+      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
 
   // Returns a special method that calls into a trampoline for runtime method resolution
   mirror::ArtMethod* GetResolutionMethod() const {
@@ -349,6 +351,39 @@
 
   mirror::ArtMethod* CreateResolutionMethod() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
 
+  // Returns a special method that calls into a trampoline for runtime imt conflicts
+  mirror::ArtMethod* GetImtConflictMethod() const {
+    CHECK(HasImtConflictMethod());
+    return imt_conflict_method_;
+  }
+
+  bool HasImtConflictMethod() const {
+    return imt_conflict_method_ != NULL;
+  }
+
+  void SetImtConflictMethod(mirror::ArtMethod* method) {
+    imt_conflict_method_ = method;
+  }
+
+  mirror::ArtMethod* CreateImtConflictMethod() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+
+  // Returns an imt with every entry set to conflict, used as default imt for all classes.
+  mirror::ObjectArray<mirror::ArtMethod>* GetDefaultImt() const {
+    CHECK(HasDefaultImt());
+    return default_imt_;
+  }
+
+  bool HasDefaultImt() const {
+    return default_imt_ != NULL;
+  }
+
+  void SetDefaultImt(mirror::ObjectArray<mirror::ArtMethod>* imt) {
+    default_imt_ = imt;
+  }
+
+  mirror::ObjectArray<mirror::ArtMethod>* CreateDefaultImt(ClassLinker* cl)
+      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+
   // Returns a special method that describes all callee saves being spilled to the stack.
   enum CalleeSaveType {
     kSaveAll,
@@ -485,6 +520,10 @@
 
   mirror::ArtMethod* resolution_method_;
 
+  mirror::ArtMethod* imt_conflict_method_;
+
+  mirror::ObjectArray<mirror::ArtMethod>* default_imt_;
+
   // A non-zero value indicates that a thread has been created but not yet initialized. Guarded by
   // the shutdown lock so that threads aren't born while we're shutting down.
   size_t threads_being_born_ GUARDED_BY(Locks::runtime_shutdown_lock_);
diff --git a/runtime/thread.cc b/runtime/thread.cc
index 3063658..9751076 100644
--- a/runtime/thread.cc
+++ b/runtime/thread.cc
@@ -1555,6 +1555,7 @@
   INTERPRETER_ENTRY_POINT_INFO(pInterpreterToInterpreterBridge),
   INTERPRETER_ENTRY_POINT_INFO(pInterpreterToCompiledCodeBridge),
   JNI_ENTRY_POINT_INFO(pDlsymLookup),
+  PORTABLE_ENTRY_POINT_INFO(pPortableImtConflictTrampoline),
   PORTABLE_ENTRY_POINT_INFO(pPortableResolutionTrampoline),
   PORTABLE_ENTRY_POINT_INFO(pPortableToInterpreterBridge),
   QUICK_ENTRY_POINT_INFO(pAllocArray),
@@ -1617,10 +1618,10 @@
   QUICK_ENTRY_POINT_INFO(pMemcmp16),
   QUICK_ENTRY_POINT_INFO(pStringCompareTo),
   QUICK_ENTRY_POINT_INFO(pMemcpy),
+  QUICK_ENTRY_POINT_INFO(pQuickImtConflictTrampoline),
   QUICK_ENTRY_POINT_INFO(pQuickResolutionTrampoline),
   QUICK_ENTRY_POINT_INFO(pQuickToInterpreterBridge),
   QUICK_ENTRY_POINT_INFO(pInvokeDirectTrampolineWithAccessCheck),
-  QUICK_ENTRY_POINT_INFO(pInvokeInterfaceTrampoline),
   QUICK_ENTRY_POINT_INFO(pInvokeInterfaceTrampolineWithAccessCheck),
   QUICK_ENTRY_POINT_INFO(pInvokeStaticTrampolineWithAccessCheck),
   QUICK_ENTRY_POINT_INFO(pInvokeSuperTrampolineWithAccessCheck),
diff --git a/test/055-enum-performance/run b/test/055-enum-performance/run
index 1436ce2..e27a622 100755
--- a/test/055-enum-performance/run
+++ b/test/055-enum-performance/run
@@ -14,5 +14,5 @@
 # See the License for the specific language governing permissions and
 # limitations under the License.
 
-# As this is a performance test we always run -O
-exec ${RUN} -O "$@"
+# As this is a performance test we always use the non-debug build.
+exec ${RUN} "${@/#libartd.so/libart.so}"
diff --git a/test/100-reflect2/expected.txt b/test/100-reflect2/expected.txt
index 967f167..3d87ebc 100644
--- a/test/100-reflect2/expected.txt
+++ b/test/100-reflect2/expected.txt
@@ -35,7 +35,7 @@
 62 (class java.lang.Long)
 14 (class java.lang.Short)
 [public java.lang.String(), java.lang.String(int,int,char[]), public java.lang.String(java.lang.String), public java.lang.String(java.lang.StringBuffer), public java.lang.String(java.lang.StringBuilder), public java.lang.String(byte[]), public java.lang.String(byte[],int), public java.lang.String(byte[],int,int), public java.lang.String(byte[],int,int,int), public java.lang.String(byte[],int,int,java.lang.String) throws java.io.UnsupportedEncodingException, public java.lang.String(byte[],int,int,java.nio.charset.Charset), public java.lang.String(byte[],java.lang.String) throws java.io.UnsupportedEncodingException, public java.lang.String(byte[],java.nio.charset.Charset), public java.lang.String(char[]), public java.lang.String(char[],int,int), public java.lang.String(int[],int,int)]
-[private final char[] java.lang.String.value, private final int java.lang.String.count, private int java.lang.String.hashCode, private final int java.lang.String.offset, private static final char[] java.lang.String.ASCII, public static final java.util.Comparator java.lang.String.CASE_INSENSITIVE_ORDER, private static final long java.lang.String.serialVersionUID, private static final char java.lang.String.REPLACEMENT_CHAR]
+[private final char[] java.lang.String.value, private final int java.lang.String.count, private int java.lang.String.hashCode, private final int java.lang.String.offset, private static final char[] java.lang.String.ASCII, public static final java.util.Comparator java.lang.String.CASE_INSENSITIVE_ORDER, private static final char java.lang.String.REPLACEMENT_CHAR, private static final long java.lang.String.serialVersionUID]
 [void java.lang.String._getChars(int,int,char[],int), public char java.lang.String.charAt(int), public int java.lang.String.codePointAt(int), public int java.lang.String.codePointBefore(int), public int java.lang.String.codePointCount(int,int), public volatile int java.lang.String.compareTo(java.lang.Object), public native int java.lang.String.compareTo(java.lang.String), public int java.lang.String.compareToIgnoreCase(java.lang.String), public java.lang.String java.lang.String.concat(java.lang.String), public boolean java.lang.String.contains(java.lang.CharSequence), public boolean java.lang.String.contentEquals(java.lang.CharSequence), public boolean java.lang.String.contentEquals(java.lang.StringBuffer), public boolean java.lang.String.endsWith(java.lang.String), public boolean java.lang.String.equals(java.lang.Object), public boolean java.lang.String.equalsIgnoreCase(java.lang.String), public void java.lang.String.getBytes(int,int,byte[],int), public [B java.lang.String.getBytes(), public [B java.lang.String.getBytes(java.lang.String) throws java.io.UnsupportedEncodingException, public [B java.lang.String.getBytes(java.nio.charset.Charset), public void java.lang.String.getChars(int,int,char[],int), public int java.lang.String.hashCode(), public int java.lang.String.indexOf(int), public int java.lang.String.indexOf(int,int), public int java.lang.String.indexOf(java.lang.String), public int java.lang.String.indexOf(java.lang.String,int), public native java.lang.String java.lang.String.intern(), public boolean java.lang.String.isEmpty(), public int java.lang.String.lastIndexOf(int), public int java.lang.String.lastIndexOf(int,int), public int java.lang.String.lastIndexOf(java.lang.String), public int java.lang.String.lastIndexOf(java.lang.String,int), public int java.lang.String.length(), public boolean java.lang.String.matches(java.lang.String), public int java.lang.String.offsetByCodePoints(int,int), public boolean java.lang.String.regionMatches(int,java.lang.String,int,int), public boolean java.lang.String.regionMatches(boolean,int,java.lang.String,int,int), public java.lang.String java.lang.String.replace(char,char), public java.lang.String java.lang.String.replace(java.lang.CharSequence,java.lang.CharSequence), public java.lang.String java.lang.String.replaceAll(java.lang.String,java.lang.String), public java.lang.String java.lang.String.replaceFirst(java.lang.String,java.lang.String), public [Ljava.lang.String; java.lang.String.split(java.lang.String), public [Ljava.lang.String; java.lang.String.split(java.lang.String,int), public boolean java.lang.String.startsWith(java.lang.String), public boolean java.lang.String.startsWith(java.lang.String,int), public java.lang.CharSequence java.lang.String.subSequence(int,int), public java.lang.String java.lang.String.substring(int), public java.lang.String java.lang.String.substring(int,int), public [C java.lang.String.toCharArray(), public java.lang.String java.lang.String.toLowerCase(), public java.lang.String java.lang.String.toLowerCase(java.util.Locale), public java.lang.String java.lang.String.toString(), public java.lang.String java.lang.String.toUpperCase(), public java.lang.String java.lang.String.toUpperCase(java.util.Locale), public java.lang.String java.lang.String.trim(), public static java.lang.String java.lang.String.copyValueOf(char[]), public static java.lang.String java.lang.String.copyValueOf(char[],int,int), private java.lang.StringIndexOutOfBoundsException java.lang.String.failedBoundsCheck(int,int,int), private native int java.lang.String.fastIndexOf(int,int), private char java.lang.String.foldCase(char), public static transient java.lang.String java.lang.String.format(java.lang.String,java.lang.Object[]), public static transient java.lang.String java.lang.String.format(java.util.Locale,java.lang.String,java.lang.Object[]), private java.lang.StringIndexOutOfBoundsException java.lang.String.indexAndLength(int), private static int java.lang.String.indexOf(java.lang.String,java.lang.String,int,int,char), private int java.lang.String.indexOfSupplementary(int,int), private int java.lang.String.lastIndexOfSupplementary(int,int), private java.lang.StringIndexOutOfBoundsException java.lang.String.startEndAndLength(int,int), public static java.lang.String java.lang.String.valueOf(char), public static java.lang.String java.lang.String.valueOf(double), public static java.lang.String java.lang.String.valueOf(float), public static java.lang.String java.lang.String.valueOf(int), public static java.lang.String java.lang.String.valueOf(long), public static java.lang.String java.lang.String.valueOf(java.lang.Object), public static java.lang.String java.lang.String.valueOf(boolean), public static java.lang.String java.lang.String.valueOf(char[]), public static java.lang.String java.lang.String.valueOf(char[],int,int)]
 []
 [interface java.io.Serializable, interface java.lang.Comparable, interface java.lang.CharSequence]
diff --git a/test/Android.mk b/test/Android.mk
index cdd61f0..6d3a84a 100644
--- a/test/Android.mk
+++ b/test/Android.mk
@@ -44,6 +44,7 @@
 	Main \
 	HelloWorld \
 	\
+        InterfaceTest \
 	JniTest \
 	NativeAllocations \
 	ParallelGC \
diff --git a/test/InterfaceTest/InterfaceTest.java b/test/InterfaceTest/InterfaceTest.java
new file mode 100644
index 0000000..ed18eb3d
--- /dev/null
+++ b/test/InterfaceTest/InterfaceTest.java
@@ -0,0 +1,53 @@
+/*
+ * Copyright (C) 2011 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+import java.util.Map;
+import java.util.HashMap;
+
+class InterfaceTest {
+
+  public static long test_virtual(HashMap map) {
+    Integer intobj = new Integer(0);
+    String s = "asdf";
+    long start = System.currentTimeMillis();
+    for (int i = 0; i < 1000000; i++) {
+        map.put(intobj, s);
+    }
+    long end = System.currentTimeMillis();
+    return (end - start);
+  }
+
+  public static long test_interface(Map map) {
+    Integer intobj = new Integer(0);
+    String s = "asdf";
+    long start = System.currentTimeMillis();
+    for (int i = 0; i < 1000000; i++) {
+        map.put(intobj, s);
+    }
+    long end = System.currentTimeMillis();
+    return (end - start);
+  }
+
+  public static void main(String[] args) {
+    HashMap hashmap = new HashMap();
+    long elapsed = test_virtual(hashmap);
+    System.logI("virtual map put: " + elapsed);
+    hashmap.clear();
+
+    elapsed = test_interface(hashmap);
+    System.logI("interface map put: " + elapsed);
+  }
+}
diff --git a/test/etc/host-run-test-jar b/test/etc/host-run-test-jar
index 357fb5a..da74532 100755
--- a/test/etc/host-run-test-jar
+++ b/test/etc/host-run-test-jar
@@ -9,7 +9,6 @@
     fi
 }
 
-LIB="libartd.so"
 DEBUGGER="n"
 GDB="n"
 INTERPRETER="n"
@@ -23,11 +22,17 @@
     if [ "x$1" = "x--quiet" ]; then
         QUIET="y"
         shift
-    elif [ "x$1" = "x-lib" ]; then
+    elif [ "x$1" = "x--lib" ]; then
         shift
+        if [ "x$1" = "x" ]; then
+            echo "$0 missing argument to --lib" 1>&2
+            exit 1
+        fi
         LIB="$1"
-    elif [ "x$1" = "x-O" ]; then
-        LIB="libart.so"
+        shift
+    elif [ "x$1" = "x--boot" ]; then
+        shift
+        BOOT_OPT="$1"
         shift
     elif [ "x$1" = "x--debug" ]; then
         DEBUGGER="y"
@@ -38,6 +43,10 @@
         shift
     elif [ "x$1" = "x--invoke-with" ]; then
         shift
+        if [ "x$1" = "x" ]; then
+            echo "$0 missing argument to --invoke-with" 1>&2
+            exit 1
+        fi
         if [ "x$INVOKE_WITH" = "x" ]; then
             INVOKE_WITH="$1"
         else
@@ -106,7 +115,9 @@
 
 JNI_OPTS="-Xjnigreflimit:512 -Xcheck:jni"
 
+if [ "$DEV_MODE" = "y" ]; then
+  echo $cmdline "$@"
+fi
+
 cd $ANDROID_BUILD_TOP
-$INVOKE_WITH $gdb $exe $gdbargs -XXlib:$LIB -Ximage:$ANDROID_ROOT/framework/core.art \
-    $JNI_OPTS $INT_OPTS $DEBUGGER_OPTS \
-    -cp $DEX_LOCATION/$TEST_NAME.jar Main "$@"
+$INVOKE_WITH $gdb $exe $gdbargs -XXlib:$LIB $JNI_OPTS $INT_OPTS $DEBUGGER_OPTS $BOOT_OPT -cp $DEX_LOCATION/$TEST_NAME.jar Main "$@"
diff --git a/test/etc/push-and-run-test-jar b/test/etc/push-and-run-test-jar
index cc28592..ff75d32 100755
--- a/test/etc/push-and-run-test-jar
+++ b/test/etc/push-and-run-test-jar
@@ -9,7 +9,6 @@
     fi
 }
 
-LIB="libartd.so"
 GDB="n"
 DEBUGGER="n"
 INTERPRETER="n"
@@ -24,11 +23,17 @@
     if [ "x$1" = "x--quiet" ]; then
         QUIET="y"
         shift
-    elif [ "x$1" = "x-lib" ]; then
+    elif [ "x$1" = "x--lib" ]; then
         shift
+        if [ "x$1" = "x" ]; then
+            echo "$0 missing argument to --lib" 1>&2
+            exit 1
+        fi
         LIB="$1"
-    elif [ "x$1" = "x-O" ]; then
-        LIB="libart.so"
+        shift
+    elif [ "x$1" = "x--boot" ]; then
+        shift
+        BOOT_OPT="$1"
         shift
     elif [ "x$1" = "x--debug" ]; then
         DEBUGGER="y"
@@ -49,6 +54,10 @@
         shift
     elif [ "x$1" = "x--invoke-with" ]; then
         shift
+        if [ "x$1" = "x" ]; then
+            echo "$0 missing argument to --invoke-with" 1>&2
+            exit 1
+        fi
         if [ "x$INVOKE_WITH" = "x" ]; then
             INVOKE_WITH="$1"
         else
@@ -132,7 +141,7 @@
 JNI_OPTS="-Xjnigreflimit:512 -Xcheck:jni"
 
 cmdline="cd $DEX_LOCATION && mkdir dalvik-cache && export ANDROID_DATA=$DEX_LOCATION && export DEX_LOCATION=$DEX_LOCATION && \
-    $INVOKE_WITH $gdb dalvikvm $gdbargs -XXlib:$LIB $ZYGOTE $JNI_OPTS $INT_OPTS $DEBUGGER_OPTS -Ximage:/data/art-test/core.art -cp $DEX_LOCATION/$TEST_NAME.jar Main"
+    $INVOKE_WITH $gdb dalvikvm $gdbargs -XXlib:$LIB $ZYGOTE $JNI_OPTS $INT_OPTS $DEBUGGER_OPTS $BOOT_OPT -cp $DEX_LOCATION/$TEST_NAME.jar Main"
 if [ "$DEV_MODE" = "y" ]; then
   echo $cmdline "$@"
 fi
diff --git a/test/run-test b/test/run-test
index c449e84..f706110 100755
--- a/test/run-test
+++ b/test/run-test
@@ -58,12 +58,14 @@
 expected="expected.txt"
 output="output.txt"
 build_output="build-output.txt"
+lib="libartd.so"
 run_args="--quiet"
 
 target_mode="yes"
 dev_mode="no"
 update_mode="no"
 debug_mode="no"
+dalvik_mode="no"
 usage="no"
 build_only="no"
 
@@ -79,7 +81,16 @@
         NEED_DEX="false"
         shift
     elif [ "x$1" = "x-O" ]; then
-        run_args="${run_args} -O"
+        lib="libart.so"
+        shift
+    elif [ "x$1" = "x--dalvik" ]; then
+        lib="libdvm.so"
+        dalvik_mode="yes"
+        shift
+    elif [ "x$1" = "x--image" ]; then
+        shift
+        image="$1"
+        run_args="${run_args} --image $image"
         shift
     elif [ "x$1" = "x--debug" ]; then
         run_args="${run_args} --debug"
@@ -106,6 +117,11 @@
     elif [ "x$1" = "x--invoke-with" ]; then
         shift
         what="$1"
+        if [ "x$what" = "x" ]; then
+            echo "$0 missing argument to --invoke-with" 1>&2
+            usage="yes"
+            break
+        fi
         run_args="${run_args} --invoke-with ${what}"
         shift
     elif [ "x$1" = "x--dev" ]; then
@@ -118,6 +134,11 @@
     elif [ "x$1" = "x--output-path" ]; then
         shift
         tmp_dir=$1
+        if [ "x$tmp_dir" = "x" ]; then
+            echo "$0 missing argument to --output-path" 1>&2
+            usage="yes"
+            break
+        fi
         shift
     elif [ "x$1" = "x--update" ]; then
         update_mode="yes"
@@ -134,6 +155,24 @@
     fi
 done
 
+run_args="${run_args} --lib $lib"
+
+if [ "$dalvik_mode" = "no" ]; then
+    if [ "$target_mode" = "no" ]; then
+        run_args="${run_args} --boot -Ximage:${ANDROID_HOST_OUT}/framework/core.art"
+    else
+        run_args="${run_args} --boot -Ximage:/data/art-test/core.art"
+    fi
+else
+    if [ "$target_mode" = "no" ]; then
+        framework="${OUT}/system/framework"
+        bpath="${framework}/core.jar:${framework}/conscrypt.jar:${framework}/okhttp.jar:${framework}/core-junit.jar:${framework}/bouncycastle.jar:${framework}/ext.jar"
+        run_args="${run_args} --boot -Xbootclasspath:${bpath}"
+    else
+        true # defaults to using target BOOTCLASSPATH
+    fi
+fi
+
 if [ "$dev_mode" = "yes" -a "$update_mode" = "yes" ]; then
     echo "--dev and --update are mutually exclusive" 1>&2
     usage="yes"
@@ -185,6 +224,7 @@
         echo "                   other runtime options are ignored."
         echo "    --host         Use the host-mode virtual machine."
         echo "    --invoke-with  Pass --invoke-with option to runtime."
+        echo "    --dalvik       Use Dalvik (off by default)."
         echo "    --jvm          Use a host-local RI virtual machine."
         echo "    --output-path [path] Location where to store the build" \
              "files."
@@ -237,7 +277,7 @@
     if [ "$build_exit" = '0' ]; then
         echo "${test_dir}: running..." 1>&2
         "./${run}" $run_args "$@" 2>&1
-	run_exit="$?"
+        run_exit="$?"
         echo "run exit status: $run_exit" 1>&2
         if [ "$run_exit" = "0" ]; then
             good="yes"