Merge "Fixed debugger for threads attached from JNI" into lmp-mr1-dev
diff --git a/compiler/image_writer.cc b/compiler/image_writer.cc
index c6fc115..3ea4e5e 100644
--- a/compiler/image_writer.cc
+++ b/compiler/image_writer.cc
@@ -449,6 +449,8 @@
       ObjectArray<Object>::Alloc(self, object_array_class.Get(), ImageHeader::kImageRootsMax)));
   image_roots->Set<false>(ImageHeader::kResolutionMethod, runtime->GetResolutionMethod());
   image_roots->Set<false>(ImageHeader::kImtConflictMethod, runtime->GetImtConflictMethod());
+  image_roots->Set<false>(ImageHeader::kImtUnimplementedMethod,
+                          runtime->GetImtUnimplementedMethod());
   image_roots->Set<false>(ImageHeader::kDefaultImt, runtime->GetDefaultImt());
   image_roots->Set<false>(ImageHeader::kCalleeSaveMethod,
                           runtime->GetCalleeSaveMethod(Runtime::kSaveAll));
@@ -685,7 +687,7 @@
 
 const byte* ImageWriter::GetQuickCode(mirror::ArtMethod* method, bool* quick_is_interpreted) {
   DCHECK(!method->IsResolutionMethod() && !method->IsImtConflictMethod() &&
-         !method->IsAbstract()) << PrettyMethod(method);
+         !method->IsImtUnimplementedMethod() && !method->IsAbstract()) << PrettyMethod(method);
 
   // Use original code if it exists. Otherwise, set the code pointer to the resolution
   // trampoline.
@@ -716,9 +718,11 @@
 const byte* ImageWriter::GetQuickEntryPoint(mirror::ArtMethod* method) {
   // Calculate the quick entry point following the same logic as FixupMethod() below.
   // The resolution method has a special trampoline to call.
-  if (UNLIKELY(method == Runtime::Current()->GetResolutionMethod())) {
+  Runtime* runtime = Runtime::Current();
+  if (UNLIKELY(method == runtime->GetResolutionMethod())) {
     return GetOatAddress(quick_resolution_trampoline_offset_);
-  } else if (UNLIKELY(method == Runtime::Current()->GetImtConflictMethod())) {
+  } else if (UNLIKELY(method == runtime->GetImtConflictMethod() ||
+                      method == runtime->GetImtUnimplementedMethod())) {
     return GetOatAddress(quick_imt_conflict_trampoline_offset_);
   } else {
     // We assume all methods have code. If they don't currently then we set them to the use the
@@ -738,12 +742,14 @@
   // oat_begin_
 
   // The resolution method has a special trampoline to call.
-  if (UNLIKELY(orig == Runtime::Current()->GetResolutionMethod())) {
+  Runtime* runtime = Runtime::Current();
+  if (UNLIKELY(orig == runtime->GetResolutionMethod())) {
 #if defined(ART_USE_PORTABLE_COMPILER)
     copy->SetEntryPointFromPortableCompiledCode<kVerifyNone>(GetOatAddress(portable_resolution_trampoline_offset_));
 #endif
     copy->SetEntryPointFromQuickCompiledCode<kVerifyNone>(GetOatAddress(quick_resolution_trampoline_offset_));
-  } else if (UNLIKELY(orig == Runtime::Current()->GetImtConflictMethod())) {
+  } else if (UNLIKELY(orig == runtime->GetImtConflictMethod() ||
+                      orig == runtime->GetImtUnimplementedMethod())) {
 #if defined(ART_USE_PORTABLE_COMPILER)
     copy->SetEntryPointFromPortableCompiledCode<kVerifyNone>(GetOatAddress(portable_imt_conflict_trampoline_offset_));
 #endif
diff --git a/oatdump/oatdump.cc b/oatdump/oatdump.cc
index 83c7871..9cf7494 100644
--- a/oatdump/oatdump.cc
+++ b/oatdump/oatdump.cc
@@ -108,6 +108,7 @@
 const char* image_roots_descriptions_[] = {
   "kResolutionMethod",
   "kImtConflictMethod",
+  "kImtUnimplementedMethod",
   "kDefaultImt",
   "kCalleeSaveMethod",
   "kRefsOnlySaveMethod",
@@ -1315,7 +1316,7 @@
         }
       } else if (method->IsAbstract() || method->IsCalleeSaveMethod() ||
           method->IsResolutionMethod() || method->IsImtConflictMethod() ||
-          method->IsClassInitializer()) {
+          method->IsImtUnimplementedMethod() || method->IsClassInitializer()) {
         DCHECK(method->GetNativeGcMap() == nullptr) << PrettyMethod(method);
         DCHECK(method->GetMappingTable() == nullptr) << PrettyMethod(method);
       } else {
diff --git a/runtime/class_linker.cc b/runtime/class_linker.cc
index d718367..ef8562e 100644
--- a/runtime/class_linker.cc
+++ b/runtime/class_linker.cc
@@ -372,6 +372,7 @@
   Runtime* runtime = Runtime::Current();
   runtime->SetResolutionMethod(runtime->CreateResolutionMethod());
   runtime->SetImtConflictMethod(runtime->CreateImtConflictMethod());
+  runtime->SetImtUnimplementedMethod(runtime->CreateImtConflictMethod());
   runtime->SetDefaultImt(runtime->CreateDefaultImt(this));
 
   // Set up GenericJNI entrypoint. That is mainly a hack for common_compiler_test.h so that
@@ -3142,7 +3143,11 @@
   new_class->SetPrimitiveType(Primitive::kPrimNot);
   new_class->SetClassLoader(component_type->GetClassLoader());
   new_class->SetStatus(mirror::Class::kStatusLoaded, self);
-  new_class->PopulateEmbeddedImtAndVTable();
+  {
+    StackHandleScope<mirror::Class::kImtSize> hs(self,
+                                                 Runtime::Current()->GetImtUnimplementedMethod());
+    new_class->PopulateEmbeddedImtAndVTable(&hs);
+  }
   new_class->SetStatus(mirror::Class::kStatusInitialized, self);
   // don't need to set new_class->SetObjectSize(..)
   // because Object::SizeOf delegates to Array::SizeOf
@@ -4371,7 +4376,9 @@
   if (!LinkSuperClass(klass)) {
     return false;
   }
-  if (!LinkMethods(self, klass, interfaces)) {
+  StackHandleScope<mirror::Class::kImtSize> imt_handle_scope(
+      self, Runtime::Current()->GetImtUnimplementedMethod());
+  if (!LinkMethods(self, klass, interfaces, &imt_handle_scope)) {
     return false;
   }
   if (!LinkInstanceFields(klass)) {
@@ -4391,7 +4398,7 @@
     CHECK_EQ(klass->GetClassSize(), class_size) << PrettyDescriptor(klass.Get());
 
     if (klass->ShouldHaveEmbeddedImtAndVTable()) {
-      klass->PopulateEmbeddedImtAndVTable();
+      klass->PopulateEmbeddedImtAndVTable(&imt_handle_scope);
     }
 
     // This will notify waiters on klass that saw the not yet resolved
@@ -4401,7 +4408,7 @@
   } else {
     CHECK(!klass->IsResolved());
     // Retire the temporary class and create the correctly sized resolved class.
-    *new_class = klass->CopyOf(self, class_size);
+    *new_class = klass->CopyOf(self, class_size, &imt_handle_scope);
     if (UNLIKELY(*new_class == nullptr)) {
       CHECK(self->IsExceptionPending());  // Expect an OOME.
       klass->SetStatus(mirror::Class::kStatusError, self);
@@ -4535,7 +4542,8 @@
 
 // Populate the class vtable and itable. Compute return type indices.
 bool ClassLinker::LinkMethods(Thread* self, Handle<mirror::Class> klass,
-                              Handle<mirror::ObjectArray<mirror::Class>> interfaces) {
+                              Handle<mirror::ObjectArray<mirror::Class>> interfaces,
+                              StackHandleScope<mirror::Class::kImtSize>* out_imt) {
   if (klass->IsInterface()) {
     // No vtable.
     size_t count = klass->NumVirtualMethods();
@@ -4546,22 +4554,19 @@
     for (size_t i = 0; i < count; ++i) {
       klass->GetVirtualMethodDuringLinking(i)->SetMethodIndex(i);
     }
-    // Link interface method tables
-    return LinkInterfaceMethods(klass, interfaces);
-  } else {
-    // Link virtual and interface method tables
-    return LinkVirtualMethods(self, klass) && LinkInterfaceMethods(klass, interfaces);
+  } else if (!LinkVirtualMethods(self, klass)) {  // Link virtual methods first.
+    return false;
   }
-  return true;
+  return LinkInterfaceMethods(self, klass, interfaces, out_imt);  // Link interface method last.
 }
 
 bool ClassLinker::LinkVirtualMethods(Thread* self, Handle<mirror::Class> klass) {
+  const size_t num_virtual_methods = klass->NumVirtualMethods();
   if (klass->HasSuperClass()) {
-    uint32_t max_count = klass->NumVirtualMethods() +
-        klass->GetSuperClass()->GetVTableLength();
-    size_t actual_count = klass->GetSuperClass()->GetVTableLength();
-    CHECK_LE(actual_count, max_count);
-    StackHandleScope<4> hs(self);
+    const size_t super_vtable_length = klass->GetSuperClass()->GetVTableLength();
+    const size_t max_count = num_virtual_methods + super_vtable_length;
+    size_t actual_count = super_vtable_length;
+    StackHandleScope<2> hs(self);
     Handle<mirror::Class> super_class(hs.NewHandle(klass->GetSuperClass()));
     Handle<mirror::ObjectArray<mirror::ArtMethod>> vtable;
     if (super_class->ShouldHaveEmbeddedImtAndVTable()) {
@@ -4570,9 +4575,8 @@
         CHECK(self->IsExceptionPending());  // OOME.
         return false;
       }
-      int len = super_class->GetVTableLength();
-      for (int i = 0; i < len; i++) {
-        vtable->Set<false>(i, super_class->GetVTableEntry(i));
+      for (size_t i = 0; i < super_vtable_length; i++) {
+        vtable->SetWithoutChecks<false>(i, super_class->GetEmbeddedVTableEntry(i));
       }
     } else {
       CHECK(super_class->GetVTable() != nullptr) << PrettyClass(super_class.Get());
@@ -4584,16 +4588,14 @@
     }
 
     // See if any of our virtual methods override the superclass.
-    MethodHelper local_mh(hs.NewHandle<mirror::ArtMethod>(nullptr));
-    MethodHelper super_mh(hs.NewHandle<mirror::ArtMethod>(nullptr));
-    for (size_t i = 0; i < klass->NumVirtualMethods(); ++i) {
+    for (size_t i = 0; i < num_virtual_methods; ++i) {
       mirror::ArtMethod* local_method = klass->GetVirtualMethodDuringLinking(i);
-      local_mh.ChangeMethod(local_method);
+      MethodProtoHelper local_helper(local_method);
       size_t j = 0;
       for (; j < actual_count; ++j) {
-        mirror::ArtMethod* super_method = vtable->Get(j);
-        super_mh.ChangeMethod(super_method);
-        if (local_mh.HasSameNameAndSignature(&super_mh)) {
+        mirror::ArtMethod* super_method = vtable->GetWithoutChecks(j);
+        MethodProtoHelper super_helper(super_method);
+        if (local_helper.HasSameNameAndSignature(super_helper)) {
           if (klass->CanAccessMember(super_method->GetDeclaringClass(),
                                      super_method->GetAccessFlags())) {
             if (super_method->IsFinal()) {
@@ -4602,21 +4604,20 @@
                                 super_method->GetDeclaringClassDescriptor());
               return false;
             }
-            vtable->Set<false>(j, local_method);
+            vtable->SetWithoutChecks<false>(j, local_method);
             local_method->SetMethodIndex(j);
             break;
-          } else {
-            LOG(WARNING) << "Before Android 4.1, method " << PrettyMethod(local_method)
-                         << " would have incorrectly overridden the package-private method in "
-                         << PrettyDescriptor(super_method->GetDeclaringClassDescriptor());
           }
+          LOG(WARNING) << "Before Android 4.1, method " << PrettyMethod(local_method)
+                       << " would have incorrectly overridden the package-private method in "
+                       << PrettyDescriptor(super_method->GetDeclaringClassDescriptor());
         }
       }
       if (j == actual_count) {
         // Not overriding, append.
-        vtable->Set<false>(actual_count, local_method);
+        vtable->SetWithoutChecks<false>(actual_count, local_method);
         local_method->SetMethodIndex(actual_count);
-        actual_count += 1;
+        ++actual_count;
       }
     }
     if (!IsUint(16, actual_count)) {
@@ -4635,72 +4636,72 @@
     klass->SetVTable(vtable.Get());
   } else {
     CHECK_EQ(klass.Get(), GetClassRoot(kJavaLangObject));
-    uint32_t num_virtual_methods = klass->NumVirtualMethods();
     if (!IsUint(16, num_virtual_methods)) {
-      ThrowClassFormatError(klass.Get(), "Too many methods: %d", num_virtual_methods);
+      ThrowClassFormatError(klass.Get(), "Too many methods: %d",
+                            static_cast<int>(num_virtual_methods));
       return false;
     }
-    StackHandleScope<1> hs(self);
-    Handle<mirror::ObjectArray<mirror::ArtMethod>>
-        vtable(hs.NewHandle(AllocArtMethodArray(self, num_virtual_methods)));
-    if (UNLIKELY(vtable.Get() == nullptr)) {
+    mirror::ObjectArray<mirror::ArtMethod>* vtable = AllocArtMethodArray(self, num_virtual_methods);
+    if (UNLIKELY(vtable == nullptr)) {
       CHECK(self->IsExceptionPending());  // OOME.
       return false;
     }
     for (size_t i = 0; i < num_virtual_methods; ++i) {
       mirror::ArtMethod* virtual_method = klass->GetVirtualMethodDuringLinking(i);
-      vtable->Set<false>(i, virtual_method);
+      vtable->SetWithoutChecks<false>(i, virtual_method);
       virtual_method->SetMethodIndex(i & 0xFFFF);
     }
-    klass->SetVTable(vtable.Get());
+    klass->SetVTable(vtable);
   }
   return true;
 }
 
-bool ClassLinker::LinkInterfaceMethods(Handle<mirror::Class> klass,
-                                       Handle<mirror::ObjectArray<mirror::Class>> interfaces) {
-  Thread* const self = Thread::Current();
+bool ClassLinker::LinkInterfaceMethods(Thread* self, Handle<mirror::Class> klass,
+                                       Handle<mirror::ObjectArray<mirror::Class>> interfaces,
+                                       StackHandleScope<mirror::Class::kImtSize>* out_imt) {
+  StackHandleScope<2> hs(self);
   Runtime* const runtime = Runtime::Current();
-  // Set the imt table to be all conflicts by default.
-  klass->SetImTable(runtime->GetDefaultImt());
-  size_t super_ifcount;
-  if (klass->HasSuperClass()) {
-    super_ifcount = klass->GetSuperClass()->GetIfTableCount();
-  } else {
-    super_ifcount = 0;
-  }
-  uint32_t num_interfaces =
-      interfaces.Get() == nullptr ? klass->NumDirectInterfaces() : interfaces->GetLength();
-  size_t ifcount = super_ifcount + num_interfaces;
-  for (size_t i = 0; i < num_interfaces; i++) {
-    mirror::Class* interface =
-        interfaces.Get() == nullptr ? mirror::Class::GetDirectInterface(self, klass, i) :
-            interfaces->Get(i);
-    ifcount += interface->GetIfTableCount();
-  }
-  if (ifcount == 0) {
-    // Class implements no interfaces.
-    DCHECK_EQ(klass->GetIfTableCount(), 0);
-    DCHECK(klass->GetIfTable() == nullptr);
-    return true;
-  }
-  if (ifcount == super_ifcount) {
+  const bool has_superclass = klass->HasSuperClass();
+  const size_t super_ifcount = has_superclass ? klass->GetSuperClass()->GetIfTableCount() : 0U;
+  const bool have_interfaces = interfaces.Get() != nullptr;
+  const size_t num_interfaces =
+      have_interfaces ? interfaces->GetLength() : klass->NumDirectInterfaces();
+  if (num_interfaces == 0) {
+    if (super_ifcount == 0) {
+      // Class implements no interfaces.
+      DCHECK_EQ(klass->GetIfTableCount(), 0);
+      DCHECK(klass->GetIfTable() == nullptr);
+      return true;
+    }
     // Class implements same interfaces as parent, are any of these not marker interfaces?
     bool has_non_marker_interface = false;
     mirror::IfTable* super_iftable = klass->GetSuperClass()->GetIfTable();
-    for (size_t i = 0; i < ifcount; ++i) {
+    for (size_t i = 0; i < super_ifcount; ++i) {
       if (super_iftable->GetMethodArrayCount(i) > 0) {
         has_non_marker_interface = true;
         break;
       }
     }
+    // Class just inherits marker interfaces from parent so recycle parent's iftable.
     if (!has_non_marker_interface) {
-      // Class just inherits marker interfaces from parent so recycle parent's iftable.
       klass->SetIfTable(super_iftable);
       return true;
     }
   }
-  StackHandleScope<4> hs(self);
+  size_t ifcount = super_ifcount + num_interfaces;
+  for (size_t i = 0; i < num_interfaces; i++) {
+    mirror::Class* interface = have_interfaces ?
+        interfaces->GetWithoutChecks(i) : mirror::Class::GetDirectInterface(self, klass, i);
+    DCHECK(interface != nullptr);
+    if (UNLIKELY(!interface->IsInterface())) {
+      std::string temp;
+      ThrowIncompatibleClassChangeError(klass.Get(), "Class %s implements non-interface class %s",
+                                        PrettyDescriptor(klass.Get()).c_str(),
+                                        PrettyDescriptor(interface->GetDescriptor(&temp)).c_str());
+      return false;
+    }
+    ifcount += interface->GetIfTableCount();
+  }
   Handle<mirror::IfTable> iftable(hs.NewHandle(AllocIfTable(self, ifcount)));
   if (UNLIKELY(iftable.Get() == nullptr)) {
     CHECK(self->IsExceptionPending());  // OOME.
@@ -4716,17 +4717,8 @@
   // Flatten the interface inheritance hierarchy.
   size_t idx = super_ifcount;
   for (size_t i = 0; i < num_interfaces; i++) {
-    mirror::Class* interface =
-        interfaces.Get() == nullptr ? mirror::Class::GetDirectInterface(self, klass, i) :
-            interfaces->Get(i);
-    DCHECK(interface != nullptr);
-    if (!interface->IsInterface()) {
-      std::string temp;
-      ThrowIncompatibleClassChangeError(klass.Get(), "Class %s implements non-interface class %s",
-                                        PrettyDescriptor(klass.Get()).c_str(),
-                                        PrettyDescriptor(interface->GetDescriptor(&temp)).c_str());
-      return false;
-    }
+    mirror::Class* interface = have_interfaces ? interfaces->Get(i) :
+        mirror::Class::GetDirectInterface(self, klass, i);
     // Check if interface is already in iftable
     bool duplicate = false;
     for (size_t j = 0; j < idx; j++) {
@@ -4758,6 +4750,7 @@
   }
   // Shrink iftable in case duplicates were found
   if (idx < ifcount) {
+    DCHECK_NE(num_interfaces, 0U);
     iftable.Assign(down_cast<mirror::IfTable*>(iftable->CopyOf(self, idx * mirror::IfTable::kMax)));
     if (UNLIKELY(iftable.Get() == nullptr)) {
       CHECK(self->IsExceptionPending());  // OOME.
@@ -4765,41 +4758,93 @@
     }
     ifcount = idx;
   } else {
-    CHECK_EQ(idx, ifcount);
+    DCHECK_EQ(idx, ifcount);
   }
   klass->SetIfTable(iftable.Get());
-
   // If we're an interface, we don't need the vtable pointers, so we're done.
   if (klass->IsInterface()) {
     return true;
   }
-  // Allocate imtable
-  bool imtable_changed = false;
-  Handle<mirror::ObjectArray<mirror::ArtMethod>> imtable(
-      hs.NewHandle(AllocArtMethodArray(self, mirror::Class::kImtSize)));
-  if (UNLIKELY(imtable.Get() == nullptr)) {
-    CHECK(self->IsExceptionPending());  // OOME.
-    return false;
-  }
-  MethodHelper interface_mh(hs.NewHandle<mirror::ArtMethod>(nullptr));
-  MethodHelper vtable_mh(hs.NewHandle<mirror::ArtMethod>(nullptr));
+  Handle<mirror::ObjectArray<mirror::ArtMethod>> vtable(
+      hs.NewHandle(klass->GetVTableDuringLinking()));
   std::vector<mirror::ArtMethod*> miranda_list;
+  // Copy the IMT from the super class if possible.
+  bool extend_super_iftable = false;
+  if (has_superclass) {
+    mirror::Class* super_class = klass->GetSuperClass();
+    extend_super_iftable = true;
+    if (super_class->ShouldHaveEmbeddedImtAndVTable()) {
+      for (size_t i = 0; i < mirror::Class::kImtSize; ++i) {
+        out_imt->SetReference(i, super_class->GetEmbeddedImTableEntry(i));
+      }
+    } else {
+      // No imt in the super class, need to reconstruct from the iftable.
+      mirror::IfTable* if_table = super_class->GetIfTable();
+      mirror::ArtMethod* conflict_method = runtime->GetImtConflictMethod();
+      const size_t length = super_class->GetIfTableCount();
+      for (size_t i = 0; i < length; ++i) {
+        mirror::Class* interface = iftable->GetInterface(i);
+        const size_t num_virtuals = interface->NumVirtualMethods();
+        const size_t method_array_count = if_table->GetMethodArrayCount(i);
+        DCHECK_EQ(num_virtuals, method_array_count);
+        if (method_array_count == 0) {
+          continue;
+        }
+        mirror::ObjectArray<mirror::ArtMethod>* method_array = if_table->GetMethodArray(i);
+        for (size_t j = 0; j < num_virtuals; ++j) {
+          mirror::ArtMethod* method = method_array->GetWithoutChecks(j);
+          if (method->IsMiranda()) {
+            continue;
+          }
+          mirror::ArtMethod* interface_method = interface->GetVirtualMethod(j);
+          uint32_t imt_index = interface_method->GetDexMethodIndex() % mirror::Class::kImtSize;
+          mirror::ArtMethod* imt_ref = out_imt->GetReference(imt_index)->AsArtMethod();
+          if (imt_ref == runtime->GetImtUnimplementedMethod()) {
+            out_imt->SetReference(imt_index, method);
+          } else if (imt_ref != conflict_method) {
+            out_imt->SetReference(imt_index, conflict_method);
+          }
+        }
+      }
+    }
+  }
   for (size_t i = 0; i < ifcount; ++i) {
     size_t num_methods = iftable->GetInterface(i)->NumVirtualMethods();
     if (num_methods > 0) {
       StackHandleScope<2> hs(self);
-      Handle<mirror::ObjectArray<mirror::ArtMethod>>
-          method_array(hs.NewHandle(AllocArtMethodArray(self, num_methods)));
+      const bool is_super = i < super_ifcount;
+      const bool super_interface = is_super && extend_super_iftable;
+      Handle<mirror::ObjectArray<mirror::ArtMethod>> method_array;
+      Handle<mirror::ObjectArray<mirror::ArtMethod>> input_array;
+      if (super_interface) {
+        mirror::IfTable* if_table = klass->GetSuperClass()->GetIfTable();
+        DCHECK(if_table != nullptr);
+        DCHECK(if_table->GetMethodArray(i) != nullptr);
+        // If we are working on a super interface, try extending the existing method array.
+        method_array = hs.NewHandle(if_table->GetMethodArray(i)->Clone(self)->
+            AsObjectArray<mirror::ArtMethod>());
+        // We are overwriting a super class interface, try to only virtual methods instead of the
+        // whole vtable.
+        input_array = hs.NewHandle(klass->GetVirtualMethods());
+      } else {
+        method_array = hs.NewHandle(AllocArtMethodArray(self, num_methods));
+        // A new interface, we need the whole vtable incase a new interface method is implemented
+        // in the whole superclass.
+        input_array = vtable;
+      }
       if (UNLIKELY(method_array.Get() == nullptr)) {
         CHECK(self->IsExceptionPending());  // OOME.
         return false;
       }
       iftable->SetMethodArray(i, method_array.Get());
-      Handle<mirror::ObjectArray<mirror::ArtMethod>> vtable(
-          hs.NewHandle(klass->GetVTableDuringLinking()));
+      if (input_array.Get() == nullptr) {
+        // If the added virtual methods is empty, do nothing.
+        DCHECK(super_interface);
+        continue;
+      }
       for (size_t j = 0; j < num_methods; ++j) {
         mirror::ArtMethod* interface_method = iftable->GetInterface(i)->GetVirtualMethod(j);
-        interface_mh.ChangeMethod(interface_method);
+        MethodProtoHelper interface_helper(interface_method);
         int32_t k;
         // For each method listed in the interface's method list, find the
         // matching method in our class's method list.  We want to favor the
@@ -4809,10 +4854,10 @@
         // it -- otherwise it would use the same vtable slot.  In .dex files
         // those don't end up in the virtual method table, so it shouldn't
         // matter which direction we go.  We walk it backward anyway.)
-        for (k = vtable->GetLength() - 1; k >= 0; --k) {
-          mirror::ArtMethod* vtable_method = vtable->Get(k);
-          vtable_mh.ChangeMethod(vtable_method);
-          if (interface_mh.HasSameNameAndSignature(&vtable_mh)) {
+        for (k = input_array->GetLength() - 1; k >= 0; --k) {
+          mirror::ArtMethod* vtable_method = input_array->GetWithoutChecks(k);
+          MethodProtoHelper vtable_helper(vtable_method);
+          if (interface_helper.HasSameNameAndSignature(vtable_helper)) {
             if (!vtable_method->IsAbstract() && !vtable_method->IsPublic()) {
               ThrowIllegalAccessError(
                   klass.Get(),
@@ -4821,53 +4866,49 @@
                   PrettyMethod(interface_method).c_str());
               return false;
             }
-            method_array->Set<false>(j, vtable_method);
+            method_array->SetWithoutChecks<false>(j, vtable_method);
             // Place method in imt if entry is empty, place conflict otherwise.
             uint32_t imt_index = interface_method->GetDexMethodIndex() % mirror::Class::kImtSize;
-            if (imtable->Get(imt_index) == nullptr) {
-              imtable->Set<false>(imt_index, vtable_method);
-              imtable_changed = true;
-            } else {
-              imtable->Set<false>(imt_index, runtime->GetImtConflictMethod());
+            mirror::ArtMethod* imt_ref = out_imt->GetReference(imt_index)->AsArtMethod();
+            mirror::ArtMethod* conflict_method = runtime->GetImtConflictMethod();
+            if (imt_ref == runtime->GetImtUnimplementedMethod()) {
+              out_imt->SetReference(imt_index, vtable_method);
+            } else if (imt_ref != conflict_method) {
+              // If we are not a conflict and we have the same signature and name as the imt entry,
+              // it must be that we overwrote a superclass vtable entry.
+              if (MethodProtoHelper(imt_ref).HasSameNameAndSignature(vtable_helper)) {
+                out_imt->SetReference(imt_index, vtable_method);
+              } else {
+                out_imt->SetReference(imt_index, conflict_method);
+              }
             }
             break;
           }
         }
-        if (k < 0) {
-          StackHandleScope<1> hs(self);
-          auto miranda_method = hs.NewHandle<mirror::ArtMethod>(nullptr);
+        if (k < 0 && !super_interface) {
+          mirror::ArtMethod* miranda_method = nullptr;
           for (mirror::ArtMethod* mir_method : miranda_list) {
-            vtable_mh.ChangeMethod(mir_method);
-            if (interface_mh.HasSameNameAndSignature(&vtable_mh)) {
-              miranda_method.Assign(mir_method);
+            MethodProtoHelper vtable_helper(mir_method);
+            if (interface_helper.HasSameNameAndSignature(vtable_helper)) {
+              miranda_method = mir_method;
               break;
             }
           }
-          if (miranda_method.Get() == nullptr) {
+          if (miranda_method == nullptr) {
             // Point the interface table at a phantom slot.
-            miranda_method.Assign(down_cast<mirror::ArtMethod*>(interface_method->Clone(self)));
-            if (UNLIKELY(miranda_method.Get() == nullptr)) {
+            miranda_method = down_cast<mirror::ArtMethod*>(interface_method->Clone(self));
+            if (UNLIKELY(miranda_method == nullptr)) {
               CHECK(self->IsExceptionPending());  // OOME.
               return false;
             }
             // TODO: If a methods move then the miranda_list may hold stale references.
-            miranda_list.push_back(miranda_method.Get());
+            miranda_list.push_back(miranda_method);
           }
-          method_array->Set<false>(j, miranda_method.Get());
+          method_array->SetWithoutChecks<false>(j, miranda_method);
         }
       }
     }
   }
-  if (imtable_changed) {
-    // Fill in empty entries in interface method table with conflict.
-    mirror::ArtMethod* imt_conflict_method = runtime->GetImtConflictMethod();
-    for (size_t i = 0; i < mirror::Class::kImtSize; i++) {
-      if (imtable->Get(i) == nullptr) {
-        imtable->Set<false>(i, imt_conflict_method);
-      }
-    }
-    klass->SetImTable(imtable.Get());
-  }
   if (!miranda_list.empty()) {
     int old_method_count = klass->NumVirtualMethods();
     int new_method_count = old_method_count + miranda_list.size();
@@ -4883,10 +4924,6 @@
     }
     klass->SetVirtualMethods(virtuals);
 
-    StackHandleScope<1> hs(self);
-    Handle<mirror::ObjectArray<mirror::ArtMethod>> vtable(
-        hs.NewHandle(klass->GetVTableDuringLinking()));
-    CHECK(vtable.Get() != nullptr);
     int old_vtable_count = vtable->GetLength();
     int new_vtable_count = old_vtable_count + miranda_list.size();
     vtable.Assign(vtable->CopyOf(self, new_vtable_count));
@@ -4900,19 +4937,19 @@
       method->SetAccessFlags(method->GetAccessFlags() | kAccMiranda);
       method->SetMethodIndex(0xFFFF & (old_vtable_count + i));
       klass->SetVirtualMethod(old_method_count + i, method);
-      vtable->Set<false>(old_vtable_count + i, method);
+      vtable->SetWithoutChecks<false>(old_vtable_count + i, method);
     }
     // TODO: do not assign to the vtable field until it is fully constructed.
     klass->SetVTable(vtable.Get());
   }
 
-  mirror::ObjectArray<mirror::ArtMethod>* vtable = klass->GetVTableDuringLinking();
-  for (int i = 0; i < vtable->GetLength(); ++i) {
-    CHECK(vtable->Get(i) != nullptr);
+  if (kIsDebugBuild) {
+    mirror::ObjectArray<mirror::ArtMethod>* vtable = klass->GetVTableDuringLinking();
+    for (int i = 0; i < vtable->GetLength(); ++i) {
+      CHECK(vtable->GetWithoutChecks(i) != nullptr);
+    }
   }
 
-//  klass->DumpClass(std::cerr, Class::kDumpClassFullDetail);
-
   return true;
 }
 
diff --git a/runtime/class_linker.h b/runtime/class_linker.h
index 107a4b2..3afbac0 100644
--- a/runtime/class_linker.h
+++ b/runtime/class_linker.h
@@ -51,6 +51,7 @@
 template<class T> class ObjectLock;
 class ScopedObjectAccessAlreadyRunnable;
 template<class T> class Handle;
+template<size_t kNumReferences> class PACKED(4) StackHandleScope;
 
 typedef bool (ClassVisitor)(mirror::Class* c, void* arg);
 
@@ -513,14 +514,16 @@
       SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
 
   bool LinkMethods(Thread* self, Handle<mirror::Class> klass,
-                   Handle<mirror::ObjectArray<mirror::Class>> interfaces)
+                   Handle<mirror::ObjectArray<mirror::Class>> interfaces,
+                   StackHandleScope<mirror::Class::kImtSize>* out_imt)
       SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
 
   bool LinkVirtualMethods(Thread* self, Handle<mirror::Class> klass)
       SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
 
-  bool LinkInterfaceMethods(Handle<mirror::Class> klass,
-                            Handle<mirror::ObjectArray<mirror::Class>> interfaces)
+  bool LinkInterfaceMethods(Thread* const self, Handle<mirror::Class> klass,
+                            Handle<mirror::ObjectArray<mirror::Class>> interfaces,
+                            StackHandleScope<mirror::Class::kImtSize>* out_imt)
       SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
 
   bool LinkStaticFields(Handle<mirror::Class> klass, size_t* class_size)
@@ -734,7 +737,8 @@
   };
   GcRoot<mirror::ObjectArray<mirror::Class>> class_roots_;
 
-  mirror::Class* GetClassRoot(ClassRoot class_root) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+  ALWAYS_INLINE mirror::Class* GetClassRoot(ClassRoot class_root)
+      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
 
   void SetClassRoot(ClassRoot class_root, mirror::Class* klass)
       SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
diff --git a/runtime/debugger.cc b/runtime/debugger.cc
index dc845c7..1f52c46 100644
--- a/runtime/debugger.cc
+++ b/runtime/debugger.cc
@@ -2364,7 +2364,7 @@
 }
 
 void Dbg::ResumeVM() {
-  Runtime::Current()->GetThreadList()->UndoDebuggerSuspensions();
+  Runtime::Current()->GetThreadList()->ResumeAllForDebugger();
 }
 
 JDWP::JdwpError Dbg::SuspendThread(JDWP::ObjectId thread_id, bool request_suspension) {
diff --git a/runtime/debugger.h b/runtime/debugger.h
index eaab1f4..131de2c 100644
--- a/runtime/debugger.h
+++ b/runtime/debugger.h
@@ -246,7 +246,9 @@
    */
   static int64_t LastDebuggerActivity();
 
-  static void UndoDebuggerSuspensions();
+  static void UndoDebuggerSuspensions()
+    LOCKS_EXCLUDED(Locks::thread_list_lock_,
+                   Locks::thread_suspend_count_lock_);
 
   /*
    * Class, Object, Array
@@ -459,7 +461,9 @@
   static void SuspendVM()
       LOCKS_EXCLUDED(Locks::thread_list_lock_,
                      Locks::thread_suspend_count_lock_);
-  static void ResumeVM();
+  static void ResumeVM()
+      LOCKS_EXCLUDED(Locks::thread_list_lock_,
+                     Locks::thread_suspend_count_lock_);
   static JDWP::JdwpError SuspendThread(JDWP::ObjectId thread_id, bool request_suspension = true)
       LOCKS_EXCLUDED(Locks::mutator_lock_,
                      Locks::thread_list_lock_,
diff --git a/runtime/entrypoints/entrypoint_utils-inl.h b/runtime/entrypoints/entrypoint_utils-inl.h
index ccbedc0..7aee01d 100644
--- a/runtime/entrypoints/entrypoint_utils-inl.h
+++ b/runtime/entrypoints/entrypoint_utils-inl.h
@@ -429,7 +429,14 @@
     case kInterface: {
       uint32_t imt_index = resolved_method->GetDexMethodIndex() % mirror::Class::kImtSize;
       mirror::ArtMethod* imt_method = (*this_object)->GetClass()->GetEmbeddedImTableEntry(imt_index);
-      if (!imt_method->IsImtConflictMethod()) {
+      if (!imt_method->IsImtConflictMethod() && !imt_method->IsImtUnimplementedMethod()) {
+        if (kIsDebugBuild) {
+          mirror::Class* klass = (*this_object)->GetClass();
+          mirror::ArtMethod* method = klass->FindVirtualMethodForInterface(resolved_method);
+          CHECK_EQ(imt_method, method) << PrettyMethod(resolved_method) << " / " <<
+              PrettyMethod(imt_method) << " / " << PrettyMethod(method) << " / " <<
+              PrettyClass(klass);
+        }
         return imt_method;
       } else {
         mirror::ArtMethod* interface_method =
diff --git a/runtime/gc/accounting/card_table.h b/runtime/gc/accounting/card_table.h
index fbeea85..6d44d89 100644
--- a/runtime/gc/accounting/card_table.h
+++ b/runtime/gc/accounting/card_table.h
@@ -54,9 +54,8 @@
   static CardTable* Create(const byte* heap_begin, size_t heap_capacity);
 
   // Set the card associated with the given address to GC_CARD_DIRTY.
-  void MarkCard(const void *addr) {
-    byte* card_addr = CardFromAddr(addr);
-    *card_addr = kCardDirty;
+  ALWAYS_INLINE void MarkCard(const void *addr) {
+    *CardFromAddr(addr) = kCardDirty;
   }
 
   // Is the object on a dirty card?
diff --git a/runtime/gc/heap.cc b/runtime/gc/heap.cc
index 6af98cf..6645303 100644
--- a/runtime/gc/heap.cc
+++ b/runtime/gc/heap.cc
@@ -2153,6 +2153,13 @@
   } else {
     LOG(FATAL) << "Invalid current allocator " << current_allocator_;
   }
+  if (IsGcConcurrent()) {
+    // Disable concurrent GC check so that we don't have spammy JNI requests.
+    // This gets recalculated in GrowForUtilization. It is important that it is disabled /
+    // calculated in the same thread so that there aren't any races that can cause it to become
+    // permanantly disabled. b/17942071
+    concurrent_start_bytes_ = std::numeric_limits<size_t>::max();
+  }
   CHECK(collector != nullptr)
       << "Could not find garbage collector with collector_type="
       << static_cast<size_t>(collector_type_) << " and gc_type=" << gc_type;
@@ -2960,9 +2967,6 @@
       self->IsHandlingStackOverflow()) {
     return;
   }
-  // We already have a request pending, no reason to start more until we update
-  // concurrent_start_bytes_.
-  concurrent_start_bytes_ = std::numeric_limits<size_t>::max();
   JNIEnv* env = self->GetJniEnv();
   DCHECK(WellKnownClasses::java_lang_Daemons != nullptr);
   DCHECK(WellKnownClasses::java_lang_Daemons_requestGC != nullptr);
diff --git a/runtime/gc/heap.h b/runtime/gc/heap.h
index cf297bd..24f4f17 100644
--- a/runtime/gc/heap.h
+++ b/runtime/gc/heap.h
@@ -372,18 +372,18 @@
 
   // Must be called if a field of an Object in the heap changes, and before any GC safe-point.
   // The call is not needed if NULL is stored in the field.
-  void WriteBarrierField(const mirror::Object* dst, MemberOffset /*offset*/,
-                         const mirror::Object* /*new_value*/) {
+  ALWAYS_INLINE void WriteBarrierField(const mirror::Object* dst, MemberOffset /*offset*/,
+                                       const mirror::Object* /*new_value*/) {
     card_table_->MarkCard(dst);
   }
 
   // Write barrier for array operations that update many field positions
-  void WriteBarrierArray(const mirror::Object* dst, int /*start_offset*/,
-                         size_t /*length TODO: element_count or byte_count?*/) {
+  ALWAYS_INLINE void WriteBarrierArray(const mirror::Object* dst, int /*start_offset*/,
+                                       size_t /*length TODO: element_count or byte_count?*/) {
     card_table_->MarkCard(dst);
   }
 
-  void WriteBarrierEveryFieldOf(const mirror::Object* obj) {
+  ALWAYS_INLINE void WriteBarrierEveryFieldOf(const mirror::Object* obj) {
     card_table_->MarkCard(obj);
   }
 
diff --git a/runtime/gc/space/image_space.cc b/runtime/gc/space/image_space.cc
index 59630fe..2aa4ad1 100644
--- a/runtime/gc/space/image_space.cc
+++ b/runtime/gc/space/image_space.cc
@@ -629,6 +629,9 @@
   runtime->SetResolutionMethod(down_cast<mirror::ArtMethod*>(resolution_method));
   mirror::Object* imt_conflict_method = image_header.GetImageRoot(ImageHeader::kImtConflictMethod);
   runtime->SetImtConflictMethod(down_cast<mirror::ArtMethod*>(imt_conflict_method));
+  mirror::Object* imt_unimplemented_method =
+      image_header.GetImageRoot(ImageHeader::kImtUnimplementedMethod);
+  runtime->SetImtUnimplementedMethod(down_cast<mirror::ArtMethod*>(imt_unimplemented_method));
   mirror::Object* default_imt = image_header.GetImageRoot(ImageHeader::kDefaultImt);
   runtime->SetDefaultImt(down_cast<mirror::ObjectArray<mirror::ArtMethod>*>(default_imt));
 
diff --git a/runtime/handle.h b/runtime/handle.h
index f70faf4..f9864dc 100644
--- a/runtime/handle.h
+++ b/runtime/handle.h
@@ -58,7 +58,7 @@
   }
 
   ALWAYS_INLINE T* Get() const SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
-    return reference_->AsMirrorPtr();
+    return down_cast<T*>(reference_->AsMirrorPtr());
   }
 
   ALWAYS_INLINE jobject ToJObject() const SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
@@ -70,25 +70,25 @@
   }
 
  protected:
-  StackReference<T>* reference_;
-
   template<typename S>
   explicit ConstHandle(StackReference<S>* reference)
-      : reference_(reinterpret_cast<StackReference<T>*>(reference)) {
+      : reference_(reference) {
   }
   template<typename S>
   explicit ConstHandle(const ConstHandle<S>& handle)
-      : reference_(reinterpret_cast<StackReference<T>*>(handle.reference_)) {
+      : reference_(handle.reference_) {
   }
 
-  StackReference<T>* GetReference() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) ALWAYS_INLINE {
+  StackReference<mirror::Object>* GetReference() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) ALWAYS_INLINE {
     return reference_;
   }
-  ALWAYS_INLINE const StackReference<T>* GetReference() const
+  ALWAYS_INLINE const StackReference<mirror::Object>* GetReference() const
       SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
     return reference_;
   }
 
+  StackReference<mirror::Object>* reference_;
+
  private:
   friend class BuildGenericJniFrameVisitor;
   template<class S> friend class ConstHandle;
@@ -120,8 +120,8 @@
   }
 
   ALWAYS_INLINE T* Assign(T* reference) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
-    StackReference<T>* ref = ConstHandle<T>::GetReference();
-    T* const old = ref->AsMirrorPtr();
+    StackReference<mirror::Object>* ref = Handle<T>::GetReference();
+    T* old = down_cast<T*>(ref->AsMirrorPtr());
     ref->Assign(reference);
     return old;
   }
@@ -131,7 +131,6 @@
       : ConstHandle<T>(handle) {
   }
 
- protected:
   template<typename S>
   explicit Handle(StackReference<S>* reference) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_)
       : ConstHandle<T>(reference) {
@@ -152,7 +151,7 @@
   }
 
  private:
-  StackReference<T> null_ref_;
+  StackReference<mirror::Object> null_ref_;
 };
 
 }  // namespace art
diff --git a/runtime/handle_scope-inl.h b/runtime/handle_scope-inl.h
index 7bc811d..da28ed7 100644
--- a/runtime/handle_scope-inl.h
+++ b/runtime/handle_scope-inl.h
@@ -25,12 +25,12 @@
 namespace art {
 
 template<size_t kNumReferences>
-inline StackHandleScope<kNumReferences>::StackHandleScope(Thread* self)
+inline StackHandleScope<kNumReferences>::StackHandleScope(Thread* self, mirror::Object* fill_value)
     : HandleScope(kNumReferences), self_(self), pos_(0) {
   // TODO: Figure out how to use a compile assert.
   DCHECK_EQ(&references_[0], &references_storage_[0]);
   for (size_t i = 0; i < kNumReferences; ++i) {
-    SetReference(i, nullptr);
+    SetReference(i, fill_value);
   }
   self_->PushHandleScope(this);
 }
diff --git a/runtime/handle_scope.h b/runtime/handle_scope.h
index 42ef779..5050872 100644
--- a/runtime/handle_scope.h
+++ b/runtime/handle_scope.h
@@ -120,6 +120,12 @@
   }
 
  protected:
+  // Return backing storage used for references.
+  ALWAYS_INLINE StackReference<mirror::Object>* GetReferences() const {
+    uintptr_t address = reinterpret_cast<uintptr_t>(this) + ReferencesOffset(sizeof(void*));
+    return reinterpret_cast<StackReference<mirror::Object>*>(address);
+  }
+
   explicit HandleScope(size_t number_of_references) :
       link_(nullptr), number_of_references_(number_of_references) {
   }
@@ -150,56 +156,48 @@
   }
 
  private:
-  T** obj_;
+  T** const obj_;
 };
 
 // Scoped handle storage of a fixed size that is usually stack allocated.
 template<size_t kNumReferences>
 class PACKED(4) StackHandleScope FINAL : public HandleScope {
  public:
-  explicit StackHandleScope(Thread* self);
-  ~StackHandleScope();
-
-  // Currently unused, using this GetReference instead of the one in HandleScope is preferred to
-  // avoid compiler optimizations incorrectly optimizing out of bound array accesses.
-  // TODO: Remove this when it is un-necessary.
-  mirror::Object* GetReference(size_t i) const SHARED_LOCKS_REQUIRED(Locks::mutator_lock_)
-      ALWAYS_INLINE {
-    DCHECK_LT(i, number_of_references_);
-    return references_storage_[i].AsMirrorPtr();
-  }
-
-  Handle<mirror::Object> GetHandle(size_t i) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_)
-      ALWAYS_INLINE {
-    DCHECK_LT(i, number_of_references_);
-    return Handle<mirror::Object>(&references_storage_[i]);
-  }
-
-  void SetReference(size_t i, mirror::Object* object) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_)
-      ALWAYS_INLINE {
-    DCHECK_LT(i, number_of_references_);
-    references_storage_[i].Assign(object);
-  }
+  explicit ALWAYS_INLINE StackHandleScope(Thread* self, mirror::Object* fill_value = nullptr);
+  ALWAYS_INLINE ~StackHandleScope();
 
   template<class T>
-  Handle<T> NewHandle(T* object) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+  ALWAYS_INLINE Handle<T> NewHandle(T* object) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
     SetReference(pos_, object);
-    Handle<T> h(GetHandle(pos_));
+    Handle<T> h(GetHandle<T>(pos_));
     pos_++;
     return h;
   }
 
   template<class T>
-  HandleWrapper<T> NewHandleWrapper(T** object) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+  ALWAYS_INLINE HandleWrapper<T> NewHandleWrapper(T** object)
+      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
     SetReference(pos_, *object);
-    Handle<T> h(GetHandle(pos_));
+    Handle<T> h(GetHandle<T>(pos_));
     pos_++;
     return HandleWrapper<T>(object, h);
   }
 
+  ALWAYS_INLINE void SetReference(size_t i, mirror::Object* object)
+      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+    DCHECK_LT(i, kNumReferences);
+    GetReferences()[i].Assign(object);
+  }
+
  private:
-  // References_storage_ needs to be first so that it appears in the same location as
-  // HandleScope::references_.
+  template<class T>
+  ALWAYS_INLINE Handle<T> GetHandle(size_t i)
+      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+    DCHECK_LT(i, kNumReferences);
+    return Handle<T>(&GetReferences()[i]);
+  }
+
+  // Reference storage needs to be first as expected by the HandleScope layout.
   StackReference<mirror::Object> references_storage_[kNumReferences];
 
   // The thread that the stack handle scope is a linked list upon. The stack handle scope will
diff --git a/runtime/image.cc b/runtime/image.cc
index 478b486..f451df9 100644
--- a/runtime/image.cc
+++ b/runtime/image.cc
@@ -24,7 +24,7 @@
 namespace art {
 
 const byte ImageHeader::kImageMagic[] = { 'a', 'r', 't', '\n' };
-const byte ImageHeader::kImageVersion[] = { '0', '0', '9', '\0' };
+const byte ImageHeader::kImageVersion[] = { '0', '1', '0', '\0' };
 
 ImageHeader::ImageHeader(uint32_t image_begin,
                          uint32_t image_size,
diff --git a/runtime/image.h b/runtime/image.h
index 424a40b..e7f5552 100644
--- a/runtime/image.h
+++ b/runtime/image.h
@@ -105,6 +105,7 @@
   enum ImageRoot {
     kResolutionMethod,
     kImtConflictMethod,
+    kImtUnimplementedMethod,
     kDefaultImt,
     kCalleeSaveMethod,
     kRefsOnlySaveMethod,
diff --git a/runtime/jdwp/jdwp_event.cc b/runtime/jdwp/jdwp_event.cc
index e49a408..10a017c 100644
--- a/runtime/jdwp/jdwp_event.cc
+++ b/runtime/jdwp/jdwp_event.cc
@@ -181,8 +181,14 @@
     for (int i = 0; i < pEvent->modCount; i++) {
       const JdwpEventMod* pMod = &pEvent->mods[i];
       if (pMod->modKind == MK_LOCATION_ONLY) {
-        /* should only be for Breakpoint, Step, and Exception */
-        Dbg::WatchLocation(&pMod->locationOnly.loc, &req);
+        // Should only concern breakpoint, field access, field modification, step, and exception
+        // events.
+        // However breakpoint requires specific handling. Field access, field modification and step
+        // events need full deoptimization to be reported while exception event is reported during
+        // exception handling.
+        if (pEvent->eventKind == EK_BREAKPOINT) {
+          Dbg::WatchLocation(&pMod->locationOnly.loc, &req);
+        }
       } else if (pMod->modKind == MK_STEP) {
         /* should only be for EK_SINGLE_STEP; should only be one */
         JdwpStepSize size = static_cast<JdwpStepSize>(pMod->step.size);
@@ -258,8 +264,10 @@
     for (int i = 0; i < pEvent->modCount; i++) {
       JdwpEventMod* pMod = &pEvent->mods[i];
       if (pMod->modKind == MK_LOCATION_ONLY) {
-        /* should only be for Breakpoint, Step, and Exception */
-        Dbg::UnwatchLocation(&pMod->locationOnly.loc, &req);
+        // Like in RegisterEvent, we need specific handling for breakpoint only.
+        if (pEvent->eventKind == EK_BREAKPOINT) {
+          Dbg::UnwatchLocation(&pMod->locationOnly.loc, &req);
+        }
       }
       if (pMod->modKind == MK_STEP) {
         /* should only be for EK_SINGLE_STEP; should only be one */
diff --git a/runtime/method_helper-inl.h b/runtime/method_helper-inl.h
index 9af835f..e680307 100644
--- a/runtime/method_helper-inl.h
+++ b/runtime/method_helper-inl.h
@@ -74,6 +74,23 @@
   return s;
 }
 
+inline MethodProtoHelper::MethodProtoHelper(mirror::ArtMethod* method) {
+  method = method->GetInterfaceMethodIfProxy();
+  dex_file_ = method->GetDexFile();
+  mid_ = &dex_file_->GetMethodId(method->GetDexMethodIndex());
+  name_ = dex_file_->StringDataAndUtf16LengthByIdx(mid_->name_idx_, &name_len_);
+}
+
+inline bool MethodProtoHelper::HasSameNameAndSignature(const MethodProtoHelper& other) const {
+  if (name_len_ != other.name_len_ || strcmp(name_, other.name_) != 0) {
+    return false;
+  }
+  if (dex_file_ == other.dex_file_) {
+    return mid_->name_idx_ == other.mid_->name_idx_ && mid_->proto_idx_ == other.mid_->proto_idx_;
+  }
+  return dex_file_->GetMethodSignature(*mid_) == other.dex_file_->GetMethodSignature(*other.mid_);
+}
+
 }  // namespace art
 
 #endif  // ART_RUNTIME_METHOD_HELPER_INL_H_
diff --git a/runtime/method_helper.h b/runtime/method_helper.h
index f71d273..37295c0 100644
--- a/runtime/method_helper.h
+++ b/runtime/method_helper.h
@@ -24,6 +24,19 @@
 
 namespace art {
 
+class MethodProtoHelper {
+ public:
+  ALWAYS_INLINE MethodProtoHelper(mirror::ArtMethod* method)
+      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+  ALWAYS_INLINE bool HasSameNameAndSignature(const MethodProtoHelper& other) const;
+
+ private:
+  const DexFile* dex_file_;
+  const DexFile::MethodId* mid_;
+  const char* name_;
+  uint32_t name_len_;
+};
+
 class MethodHelper {
  public:
   explicit MethodHelper(Handle<mirror::ArtMethod> m) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_)
diff --git a/runtime/mirror/array.h b/runtime/mirror/array.h
index 7af88d6..75d8d91 100644
--- a/runtime/mirror/array.h
+++ b/runtime/mirror/array.h
@@ -50,7 +50,7 @@
            ReadBarrierOption kReadBarrierOption = kWithReadBarrier>
   size_t SizeOf() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
   template<VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags>
-  int32_t GetLength() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+  ALWAYS_INLINE int32_t GetLength() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
     return GetField32<kVerifyFlags>(OFFSET_OF_OBJECT_MEMBER(Array, length_));
   }
 
@@ -90,7 +90,7 @@
   // Returns true if the index is valid. If not, throws an ArrayIndexOutOfBoundsException and
   // returns false.
   template<VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags>
-  bool CheckIsValidIndex(int32_t index) ALWAYS_INLINE SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+  ALWAYS_INLINE bool CheckIsValidIndex(int32_t index) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
 
  protected:
   void ThrowArrayStoreException(Object* object) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
diff --git a/runtime/mirror/art_method-inl.h b/runtime/mirror/art_method-inl.h
index 9782dde..1af78f3 100644
--- a/runtime/mirror/art_method-inl.h
+++ b/runtime/mirror/art_method-inl.h
@@ -329,6 +329,14 @@
   return result;
 }
 
+
+inline bool ArtMethod::IsImtUnimplementedMethod() {
+  bool result = this == Runtime::Current()->GetImtUnimplementedMethod();
+  // Check that if we do think it is phony it looks like the imt unimplemented method.
+  DCHECK(!result || IsRuntimeMethod());
+  return result;
+}
+
 inline uintptr_t ArtMethod::NativePcOffset(const uintptr_t pc) {
   const void* code = Runtime::Current()->GetInstrumentation()->GetQuickCodeFor(this);
   return pc - reinterpret_cast<uintptr_t>(code);
@@ -399,7 +407,7 @@
 }
 
 inline const DexFile* ArtMethod::GetDexFile() {
-  return GetInterfaceMethodIfProxy()->GetDeclaringClass()->GetDexCache()->GetDexFile();
+  return GetDexCache()->GetDexFile();
 }
 
 inline const char* ArtMethod::GetDeclaringClassDescriptor() {
@@ -531,6 +539,21 @@
   return interface_method;
 }
 
+inline void ArtMethod::SetDexCacheStrings(ObjectArray<String>* new_dex_cache_strings) {
+  SetFieldObject<false>(OFFSET_OF_OBJECT_MEMBER(ArtMethod, dex_cache_strings_),
+                        new_dex_cache_strings);
+}
+
+inline void ArtMethod::SetDexCacheResolvedMethods(ObjectArray<ArtMethod>* new_dex_cache_methods) {
+  SetFieldObject<false>(OFFSET_OF_OBJECT_MEMBER(ArtMethod, dex_cache_resolved_methods_),
+                        new_dex_cache_methods);
+}
+
+inline void ArtMethod::SetDexCacheResolvedTypes(ObjectArray<Class>* new_dex_cache_classes) {
+  SetFieldObject<false>(OFFSET_OF_OBJECT_MEMBER(ArtMethod, dex_cache_resolved_types_),
+                        new_dex_cache_classes);
+}
+
 }  // namespace mirror
 }  // namespace art
 
diff --git a/runtime/mirror/art_method.cc b/runtime/mirror/art_method.cc
index 27499c2..bf28287 100644
--- a/runtime/mirror/art_method.cc
+++ b/runtime/mirror/art_method.cc
@@ -89,21 +89,6 @@
   java_lang_reflect_ArtMethod_ = GcRoot<Class>(nullptr);
 }
 
-void ArtMethod::SetDexCacheStrings(ObjectArray<String>* new_dex_cache_strings) {
-  SetFieldObject<false>(OFFSET_OF_OBJECT_MEMBER(ArtMethod, dex_cache_strings_),
-                        new_dex_cache_strings);
-}
-
-void ArtMethod::SetDexCacheResolvedMethods(ObjectArray<ArtMethod>* new_dex_cache_methods) {
-  SetFieldObject<false>(OFFSET_OF_OBJECT_MEMBER(ArtMethod, dex_cache_resolved_methods_),
-                        new_dex_cache_methods);
-}
-
-void ArtMethod::SetDexCacheResolvedTypes(ObjectArray<Class>* new_dex_cache_classes) {
-  SetFieldObject<false>(OFFSET_OF_OBJECT_MEMBER(ArtMethod, dex_cache_resolved_types_),
-                        new_dex_cache_classes);
-}
-
 size_t ArtMethod::NumArgRegisters(const StringPiece& shorty) {
   CHECK_LE(1, shorty.length());
   uint32_t num_registers = 0;
diff --git a/runtime/mirror/art_method.h b/runtime/mirror/art_method.h
index abfdd42..1a4ec30 100644
--- a/runtime/mirror/art_method.h
+++ b/runtime/mirror/art_method.h
@@ -64,7 +64,7 @@
     return MemberOffset(OFFSETOF_MEMBER(ArtMethod, declaring_class_));
   }
 
-  uint32_t GetAccessFlags() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+  ALWAYS_INLINE uint32_t GetAccessFlags() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
 
   void SetAccessFlags(uint32_t new_access_flags) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
     // Not called within a transaction.
@@ -193,7 +193,7 @@
   // Number of 32bit registers that would be required to hold all the arguments
   static size_t NumArgRegisters(const StringPiece& shorty);
 
-  uint32_t GetDexMethodIndex() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+  ALWAYS_INLINE uint32_t GetDexMethodIndex() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
 
   void SetDexMethodIndex(uint32_t new_idx) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
     // Not called within a transaction.
@@ -216,11 +216,11 @@
     return OFFSET_OF_OBJECT_MEMBER(ArtMethod, dex_cache_resolved_types_);
   }
 
-  ArtMethod* GetDexCacheResolvedMethod(uint16_t method_idx)
+  ALWAYS_INLINE ArtMethod* GetDexCacheResolvedMethod(uint16_t method_idx)
       SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
-  void SetDexCacheResolvedMethod(uint16_t method_idx, ArtMethod* new_method)
+  ALWAYS_INLINE void SetDexCacheResolvedMethod(uint16_t method_idx, ArtMethod* new_method)
       SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
-  void SetDexCacheResolvedMethods(ObjectArray<ArtMethod>* new_dex_cache_methods)
+  ALWAYS_INLINE void SetDexCacheResolvedMethods(ObjectArray<ArtMethod>* new_dex_cache_methods)
       SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
   bool HasDexCacheResolvedMethods() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
   bool HasSameDexCacheResolvedMethods(ArtMethod* other) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
@@ -409,6 +409,8 @@
 
   bool IsImtConflictMethod() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
 
+  bool IsImtUnimplementedMethod() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+
   uintptr_t NativePcOffset(const uintptr_t pc) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
   uintptr_t NativePcOffset(const uintptr_t pc, const void* quick_entry_point)
       SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
@@ -536,9 +538,11 @@
   static GcRoot<Class> java_lang_reflect_ArtMethod_;
 
  private:
-  ObjectArray<ArtMethod>* GetDexCacheResolvedMethods() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+  ALWAYS_INLINE ObjectArray<ArtMethod>* GetDexCacheResolvedMethods()
+      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
 
-  ObjectArray<Class>* GetDexCacheResolvedTypes() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+  ALWAYS_INLINE ObjectArray<Class>* GetDexCacheResolvedTypes()
+      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
 
   friend struct art::ArtMethodOffsets;  // for verifying offset information
   DISALLOW_IMPLICIT_CONSTRUCTORS(ArtMethod);
diff --git a/runtime/mirror/class-inl.h b/runtime/mirror/class-inl.h
index 8e44471..0d109e5 100644
--- a/runtime/mirror/class-inl.h
+++ b/runtime/mirror/class-inl.h
@@ -113,19 +113,19 @@
 template<VerifyObjectFlags kVerifyFlags>
 inline ArtMethod* Class::GetVirtualMethod(uint32_t i) {
   DCHECK(IsResolved<kVerifyFlags>() || IsErroneous<kVerifyFlags>());
-  return GetVirtualMethods()->Get(i);
+  return GetVirtualMethods()->GetWithoutChecks(i);
 }
 
 inline ArtMethod* Class::GetVirtualMethodDuringLinking(uint32_t i) {
   DCHECK(IsLoaded() || IsErroneous());
-  return GetVirtualMethods()->Get(i);
+  return GetVirtualMethods()->GetWithoutChecks(i);
 }
 
 inline void Class::SetVirtualMethod(uint32_t i, ArtMethod* f)  // TODO: uint16_t
     SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
   ObjectArray<ArtMethod>* virtual_methods =
       GetFieldObject<ObjectArray<ArtMethod>>(OFFSET_OF_OBJECT_MEMBER(Class, virtual_methods_));
-  virtual_methods->Set<false>(i, f);
+  virtual_methods->SetWithoutChecks<false>(i, f);
 }
 
 inline ObjectArray<ArtMethod>* Class::GetVTable() {
@@ -142,14 +142,6 @@
   SetFieldObject<false>(OFFSET_OF_OBJECT_MEMBER(Class, vtable_), new_vtable);
 }
 
-inline ObjectArray<ArtMethod>* Class::GetImTable() {
-  return GetFieldObject<ObjectArray<ArtMethod>>(OFFSET_OF_OBJECT_MEMBER(Class, imtable_));
-}
-
-inline void Class::SetImTable(ObjectArray<ArtMethod>* new_imtable) {
-  SetFieldObject<false>(OFFSET_OF_OBJECT_MEMBER(Class, imtable_), new_imtable);
-}
-
 inline ArtMethod* Class::GetEmbeddedImTableEntry(uint32_t i) {
   uint32_t offset = EmbeddedImTableOffset().Uint32Value() + i * sizeof(ImTableEntry);
   return GetFieldObject<mirror::ArtMethod>(MemberOffset(offset));
@@ -158,7 +150,6 @@
 inline void Class::SetEmbeddedImTableEntry(uint32_t i, ArtMethod* method) {
   uint32_t offset = EmbeddedImTableOffset().Uint32Value() + i * sizeof(ImTableEntry);
   SetFieldObject<false>(MemberOffset(offset), method);
-  CHECK(method == GetImTable()->Get(i));
 }
 
 inline bool Class::HasVTable() {
@@ -733,6 +724,24 @@
   }
 }
 
+inline uint32_t Class::NumDirectInterfaces() {
+  if (IsPrimitive()) {
+    return 0;
+  } else if (IsArrayClass()) {
+    return 2;
+  } else if (IsProxyClass()) {
+    mirror::ObjectArray<mirror::Class>* interfaces = GetInterfaces();
+    return interfaces != nullptr ? interfaces->GetLength() : 0;
+  } else {
+    const DexFile::TypeList* interfaces = GetInterfaceTypeList();
+    if (interfaces == nullptr) {
+      return 0;
+    } else {
+      return interfaces->Size();
+    }
+  }
+}
+
 }  // namespace mirror
 }  // namespace art
 
diff --git a/runtime/mirror/class.cc b/runtime/mirror/class.cc
index 5b8eb82..c173935 100644
--- a/runtime/mirror/class.cc
+++ b/runtime/mirror/class.cc
@@ -748,24 +748,6 @@
   return &GetDexFile().GetClassDef(class_def_idx);
 }
 
-uint32_t Class::NumDirectInterfaces() {
-  if (IsPrimitive()) {
-    return 0;
-  } else if (IsArrayClass()) {
-    return 2;
-  } else if (IsProxyClass()) {
-    mirror::ObjectArray<mirror::Class>* interfaces = GetInterfaces();
-    return interfaces != nullptr ? interfaces->GetLength() : 0;
-  } else {
-    const DexFile::TypeList* interfaces = GetInterfaceTypeList();
-    if (interfaces == nullptr) {
-      return 0;
-    } else {
-      return interfaces->Size();
-    }
-  }
-}
-
 uint16_t Class::GetDirectInterfaceTypeIdx(uint32_t idx) {
   DCHECK(!IsPrimitive());
   DCHECK(!IsArrayClass());
@@ -826,22 +808,21 @@
   return GetDexFile().GetInterfacesList(*class_def);
 }
 
-void Class::PopulateEmbeddedImtAndVTable() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
-  ObjectArray<ArtMethod>* table = GetImTable();
-  if (table != nullptr) {
-    for (uint32_t i = 0; i < kImtSize; i++) {
-      SetEmbeddedImTableEntry(i, table->Get(i));
-    }
+void Class::PopulateEmbeddedImtAndVTable(StackHandleScope<kImtSize>* imt_handle_scope) {
+  for (uint32_t i = 0; i < kImtSize; i++) {
+    // Replace null with conflict.
+    mirror::Object* obj = imt_handle_scope->GetReference(i);
+    DCHECK(obj != nullptr);
+    SetEmbeddedImTableEntry(i, obj->AsArtMethod());
   }
 
-  table = GetVTableDuringLinking();
+  ObjectArray<ArtMethod>* table = GetVTableDuringLinking();
   CHECK(table != nullptr) << PrettyClass(this);
   SetEmbeddedVTableLength(table->GetLength());
   for (int32_t i = 0; i < table->GetLength(); i++) {
-    SetEmbeddedVTableEntry(i, table->Get(i));
+    SetEmbeddedVTableEntry(i, table->GetWithoutChecks(i));
   }
 
-  SetImTable(nullptr);
   // Keep java.lang.Object class's vtable around for since it's easier
   // to be reused by array classes during their linking.
   if (!IsObjectClass()) {
@@ -853,9 +834,10 @@
 class CopyClassVisitor {
  public:
   explicit CopyClassVisitor(Thread* self, Handle<mirror::Class>* orig,
-                            size_t new_length, size_t copy_bytes)
+                            size_t new_length, size_t copy_bytes,
+                            StackHandleScope<mirror::Class::kImtSize>* imt_handle_scope)
       : self_(self), orig_(orig), new_length_(new_length),
-        copy_bytes_(copy_bytes) {
+        copy_bytes_(copy_bytes), imt_handle_scope_(imt_handle_scope) {
   }
 
   void operator()(Object* obj, size_t usable_size) const
@@ -864,7 +846,7 @@
     mirror::Class* new_class_obj = obj->AsClass();
     mirror::Object::CopyObject(self_, new_class_obj, orig_->Get(), copy_bytes_);
     new_class_obj->SetStatus(Class::kStatusResolving, self_);
-    new_class_obj->PopulateEmbeddedImtAndVTable();
+    new_class_obj->PopulateEmbeddedImtAndVTable(imt_handle_scope_);
     new_class_obj->SetClassSize(new_length_);
   }
 
@@ -873,10 +855,12 @@
   Handle<mirror::Class>* const orig_;
   const size_t new_length_;
   const size_t copy_bytes_;
+  StackHandleScope<mirror::Class::kImtSize>* const imt_handle_scope_;
   DISALLOW_COPY_AND_ASSIGN(CopyClassVisitor);
 };
 
-Class* Class::CopyOf(Thread* self, int32_t new_length) {
+Class* Class::CopyOf(Thread* self, int32_t new_length,
+                     StackHandleScope<kImtSize>* imt_handle_scope) {
   DCHECK_GE(new_length, static_cast<int32_t>(sizeof(Class)));
   // We may get copied by a compacting GC.
   StackHandleScope<1> hs(self);
@@ -884,17 +868,15 @@
   gc::Heap* heap = Runtime::Current()->GetHeap();
   // The num_bytes (3rd param) is sizeof(Class) as opposed to SizeOf()
   // to skip copying the tail part that we will overwrite here.
-  CopyClassVisitor visitor(self, &h_this, new_length, sizeof(Class));
-
+  CopyClassVisitor visitor(self, &h_this, new_length, sizeof(Class), imt_handle_scope);
   mirror::Object* new_class =
       kMovingClasses
          ? heap->AllocObject<true>(self, java_lang_Class_.Read(), new_length, visitor)
          : heap->AllocNonMovableObject<true>(self, java_lang_Class_.Read(), new_length, visitor);
   if (UNLIKELY(new_class == nullptr)) {
     CHECK(self->IsExceptionPending());  // Expect an OOME.
-    return NULL;
+    return nullptr;
   }
-
   return new_class->AsClass();
 }
 
diff --git a/runtime/mirror/class.h b/runtime/mirror/class.h
index 03a8563..2d536ad 100644
--- a/runtime/mirror/class.h
+++ b/runtime/mirror/class.h
@@ -67,6 +67,7 @@
 struct ClassOffsets;
 class Signature;
 class StringPiece;
+template<size_t kNumReferences> class PACKED(4) StackHandleScope;
 
 namespace mirror {
 
@@ -215,46 +216,46 @@
   }
 
   template<VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags>
-  uint32_t GetAccessFlags() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+  ALWAYS_INLINE uint32_t GetAccessFlags() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
 
   void SetAccessFlags(uint32_t new_access_flags) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
 
   // Returns true if the class is an interface.
-  bool IsInterface() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+  ALWAYS_INLINE bool IsInterface() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
     return (GetAccessFlags() & kAccInterface) != 0;
   }
 
   // Returns true if the class is declared public.
-  bool IsPublic() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+  ALWAYS_INLINE bool IsPublic() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
     return (GetAccessFlags() & kAccPublic) != 0;
   }
 
   // Returns true if the class is declared final.
-  bool IsFinal() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+  ALWAYS_INLINE bool IsFinal() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
     return (GetAccessFlags() & kAccFinal) != 0;
   }
 
-  bool IsFinalizable() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+  ALWAYS_INLINE bool IsFinalizable() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
     return (GetAccessFlags() & kAccClassIsFinalizable) != 0;
   }
 
-  void SetFinalizable() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+  ALWAYS_INLINE void SetFinalizable() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
     uint32_t flags = GetField32(OFFSET_OF_OBJECT_MEMBER(Class, access_flags_));
     SetAccessFlags(flags | kAccClassIsFinalizable);
   }
 
   // Returns true if the class is abstract.
-  bool IsAbstract() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+  ALWAYS_INLINE bool IsAbstract() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
     return (GetAccessFlags() & kAccAbstract) != 0;
   }
 
   // Returns true if the class is an annotation.
-  bool IsAnnotation() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+  ALWAYS_INLINE bool IsAnnotation() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
     return (GetAccessFlags() & kAccAnnotation) != 0;
   }
 
   // Returns true if the class is synthetic.
-  bool IsSynthetic() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+  ALWAYS_INLINE bool IsSynthetic() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
     return (GetAccessFlags() & kAccSynthetic) != 0;
   }
 
@@ -582,7 +583,7 @@
   // downcast would be necessary. Similarly for interfaces, a class that implements (or an interface
   // that extends) another can be assigned to its parent, but not vice-versa. All Classes may assign
   // to themselves. Classes for primitive types may not assign to each other.
-  inline bool IsAssignableFrom(Class* src) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+  ALWAYS_INLINE bool IsAssignableFrom(Class* src) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
     DCHECK(src != NULL);
     if (this == src) {
       // Can always assign to things of the same type.
@@ -599,7 +600,7 @@
     }
   }
 
-  Class* GetSuperClass() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+  ALWAYS_INLINE Class* GetSuperClass() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
 
   void SetSuperClass(Class *new_super_class) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
     // Super class is assigned once, except during class linker initialization.
@@ -638,12 +639,13 @@
 
   void SetDexCache(DexCache* new_dex_cache) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
 
-  ObjectArray<ArtMethod>* GetDirectMethods() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+  ALWAYS_INLINE ObjectArray<ArtMethod>* GetDirectMethods()
+      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
 
   void SetDirectMethods(ObjectArray<ArtMethod>* new_direct_methods)
       SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
 
-  ArtMethod* GetDirectMethod(int32_t i) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+  ALWAYS_INLINE ArtMethod* GetDirectMethod(int32_t i) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
 
   void SetDirectMethod(uint32_t i, ArtMethod* f)  // TODO: uint16_t
       SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
@@ -652,13 +654,14 @@
   uint32_t NumDirectMethods() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
 
   template<VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags>
-  ObjectArray<ArtMethod>* GetVirtualMethods() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+  ALWAYS_INLINE ObjectArray<ArtMethod>* GetVirtualMethods()
+      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
 
-  void SetVirtualMethods(ObjectArray<ArtMethod>* new_virtual_methods)
+  ALWAYS_INLINE void SetVirtualMethods(ObjectArray<ArtMethod>* new_virtual_methods)
       SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
 
   // Returns the number of non-inherited virtual methods.
-  uint32_t NumVirtualMethods() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+  ALWAYS_INLINE uint32_t NumVirtualMethods() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
 
   template<VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags>
   ArtMethod* GetVirtualMethod(uint32_t i) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
@@ -668,9 +671,10 @@
   void SetVirtualMethod(uint32_t i, ArtMethod* f)  // TODO: uint16_t
       SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
 
-  ObjectArray<ArtMethod>* GetVTable() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+  ALWAYS_INLINE ObjectArray<ArtMethod>* GetVTable() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
 
-  ObjectArray<ArtMethod>* GetVTableDuringLinking() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+  ALWAYS_INLINE ObjectArray<ArtMethod>* GetVTableDuringLinking()
+      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
 
   void SetVTable(ObjectArray<ArtMethod>* new_vtable)
       SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
@@ -679,13 +683,6 @@
     return OFFSET_OF_OBJECT_MEMBER(Class, vtable_);
   }
 
-  void SetImTable(ObjectArray<ArtMethod>* new_imtable)
-      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
-
-  static MemberOffset ImTableOffset() {
-    return OFFSET_OF_OBJECT_MEMBER(Class, imtable_);
-  }
-
   static MemberOffset EmbeddedImTableOffset() {
     return MemberOffset(sizeof(Class));
   }
@@ -695,7 +692,7 @@
   }
 
   static MemberOffset EmbeddedVTableOffset() {
-    return MemberOffset(sizeof(Class) + kImtSize * sizeof(mirror::Class::ImTableEntry) + sizeof(int32_t));
+    return MemberOffset(sizeof(Class) + kImtSize * sizeof(ImTableEntry) + sizeof(int32_t));
   }
 
   bool ShouldHaveEmbeddedImtAndVTable() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
@@ -720,7 +717,8 @@
 
   void SetEmbeddedVTableEntry(uint32_t i, ArtMethod* method) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
 
-  void PopulateEmbeddedImtAndVTable() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+  void PopulateEmbeddedImtAndVTable(StackHandleScope<kImtSize>* imt_handle_scope)
+      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
 
   // Given a method implemented by this class but potentially from a super class, return the
   // specific implementation method for this class.
@@ -788,11 +786,11 @@
 
   ArtMethod* FindClassInitializer() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
 
-  int32_t GetIfTableCount() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+  ALWAYS_INLINE int32_t GetIfTableCount() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
 
-  IfTable* GetIfTable() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+  ALWAYS_INLINE IfTable* GetIfTable() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
 
-  void SetIfTable(IfTable* new_iftable) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+  ALWAYS_INLINE void SetIfTable(IfTable* new_iftable) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
 
   // Get instance fields of the class (See also GetSFields).
   ObjectArray<ArtField>* GetIFields() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
@@ -968,7 +966,7 @@
 
   const DexFile::ClassDef* GetClassDef() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
 
-  uint32_t NumDirectInterfaces() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+  ALWAYS_INLINE uint32_t NumDirectInterfaces() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
 
   uint16_t GetDirectInterfaceTypeIdx(uint32_t idx) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
 
@@ -987,7 +985,7 @@
   void AssertInitializedOrInitializingInThread(Thread* self)
       SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
 
-  Class* CopyOf(Thread* self, int32_t new_length)
+  Class* CopyOf(Thread* self, int32_t new_length, StackHandleScope<kImtSize>* imt_handle_scope)
       SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
 
   // For proxy class only.
@@ -1036,8 +1034,6 @@
 
   void CheckObjectAlloc() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
 
-  ObjectArray<ArtMethod>* GetImTable() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
-
   // defining class loader, or NULL for the "bootstrap" system loader
   HeapReference<ClassLoader> class_loader_;
 
diff --git a/runtime/mirror/iftable-inl.h b/runtime/mirror/iftable-inl.h
index 3f20bf4..d1309d2 100644
--- a/runtime/mirror/iftable-inl.h
+++ b/runtime/mirror/iftable-inl.h
@@ -27,7 +27,7 @@
   DCHECK(interface->IsInterface());
   const size_t idx = i * kMax + kInterface;
   DCHECK_EQ(Get(idx), static_cast<Object*>(nullptr));
-  Set<false>(idx, interface);
+  SetWithoutChecks<false>(idx, interface);
 }
 
 }  // namespace mirror
diff --git a/runtime/mirror/iftable.h b/runtime/mirror/iftable.h
index 5feb602..4d899d2 100644
--- a/runtime/mirror/iftable.h
+++ b/runtime/mirror/iftable.h
@@ -25,13 +25,14 @@
 
 class MANAGED IfTable FINAL : public ObjectArray<Object> {
  public:
-  Class* GetInterface(int32_t i) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
-    Class* interface = Get((i * kMax) + kInterface)->AsClass();
+  ALWAYS_INLINE Class* GetInterface(int32_t i) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+    Class* interface = GetWithoutChecks((i * kMax) + kInterface)->AsClass();
     DCHECK(interface != NULL);
     return interface;
   }
 
-  void SetInterface(int32_t i, Class* interface) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+  ALWAYS_INLINE void SetInterface(int32_t i, Class* interface)
+      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
 
   ObjectArray<ArtMethod>* GetMethodArray(int32_t i) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
     ObjectArray<ArtMethod>* method_array =
diff --git a/runtime/mirror/object_array.h b/runtime/mirror/object_array.h
index 7012b19..6404faf 100644
--- a/runtime/mirror/object_array.h
+++ b/runtime/mirror/object_array.h
@@ -45,11 +45,11 @@
   template<VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags>
   bool CheckAssignable(T* object) NO_THREAD_SAFETY_ANALYSIS;
 
-  void Set(int32_t i, T* object) ALWAYS_INLINE SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+  ALWAYS_INLINE void Set(int32_t i, T* object) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
   // TODO fix thread safety analysis: should be SHARED_LOCKS_REQUIRED(Locks::mutator_lock_).
   template<bool kTransactionActive, bool kCheckTransaction = true,
       VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags>
-  void Set(int32_t i, T* object) ALWAYS_INLINE NO_THREAD_SAFETY_ANALYSIS;
+  ALWAYS_INLINE void Set(int32_t i, T* object) NO_THREAD_SAFETY_ANALYSIS;
 
   // Set element without bound and element type checks, to be used in limited
   // circumstances, such as during boot image writing.
@@ -57,15 +57,15 @@
   // SHARED_LOCKS_REQUIRED(Locks::mutator_lock_).
   template<bool kTransactionActive, bool kCheckTransaction = true,
       VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags>
-  void SetWithoutChecks(int32_t i, T* object) ALWAYS_INLINE NO_THREAD_SAFETY_ANALYSIS;
+  ALWAYS_INLINE void SetWithoutChecks(int32_t i, T* object) NO_THREAD_SAFETY_ANALYSIS;
   // TODO fix thread safety analysis broken by the use of template. This should be
   // SHARED_LOCKS_REQUIRED(Locks::mutator_lock_).
   template<bool kTransactionActive, bool kCheckTransaction = true,
       VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags>
-  void SetWithoutChecksAndWriteBarrier(int32_t i, T* object) ALWAYS_INLINE
+  ALWAYS_INLINE void SetWithoutChecksAndWriteBarrier(int32_t i, T* object)
       NO_THREAD_SAFETY_ANALYSIS;
 
-  T* GetWithoutChecks(int32_t i) ALWAYS_INLINE SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+  ALWAYS_INLINE T* GetWithoutChecks(int32_t i) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
 
   // Copy src into this array (dealing with overlaps as memmove does) without assignability checks.
   void AssignableMemmove(int32_t dst_pos, ObjectArray<T>* src, int32_t src_pos,
diff --git a/runtime/runtime-inl.h b/runtime/runtime-inl.h
index ac9026b..001598f 100644
--- a/runtime/runtime-inl.h
+++ b/runtime/runtime-inl.h
@@ -49,6 +49,11 @@
   return imt_conflict_method_.Read();
 }
 
+inline mirror::ArtMethod* Runtime::GetImtUnimplementedMethod() {
+  CHECK(!imt_unimplemented_method_.IsNull());
+  return imt_unimplemented_method_.Read();
+}
+
 inline mirror::ObjectArray<mirror::ArtMethod>* Runtime::GetDefaultImt()
     SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
   CHECK(HasDefaultImt());
diff --git a/runtime/runtime.cc b/runtime/runtime.cc
index 9848382..bd8aa08f 100644
--- a/runtime/runtime.cc
+++ b/runtime/runtime.cc
@@ -1156,6 +1156,9 @@
   if (HasImtConflictMethod()) {
     imt_conflict_method_.VisitRoot(callback, arg, 0, kRootVMInternal);
   }
+  if (!imt_unimplemented_method_.IsNull()) {
+    imt_unimplemented_method_.VisitRoot(callback, arg, 0, kRootVMInternal);
+  }
   if (HasDefaultImt()) {
     default_imt_.VisitRoot(callback, arg, 0, kRootVMInternal);
   }
diff --git a/runtime/runtime.h b/runtime/runtime.h
index 8cfa8aa..a34829d 100644
--- a/runtime/runtime.h
+++ b/runtime/runtime.h
@@ -310,6 +310,7 @@
 
   // Returns a special method that calls into a trampoline for runtime imt conflicts.
   mirror::ArtMethod* GetImtConflictMethod() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+  mirror::ArtMethod* GetImtUnimplementedMethod() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
 
   bool HasImtConflictMethod() const {
     return !imt_conflict_method_.IsNull();
@@ -318,6 +319,9 @@
   void SetImtConflictMethod(mirror::ArtMethod* method) {
     imt_conflict_method_ = GcRoot<mirror::ArtMethod>(method);
   }
+  void SetImtUnimplementedMethod(mirror::ArtMethod* method) {
+    imt_unimplemented_method_ = GcRoot<mirror::ArtMethod>(method);
+  }
 
   mirror::ArtMethod* CreateImtConflictMethod() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
 
@@ -515,6 +519,9 @@
   GcRoot<mirror::Throwable> pre_allocated_NoClassDefFoundError_;
   GcRoot<mirror::ArtMethod> resolution_method_;
   GcRoot<mirror::ArtMethod> imt_conflict_method_;
+  // Unresolved method has the same behavior as the conflict method, it is used by the class linker
+  // for differentiating between unfilled imt slots vs conflict slots in superclasses.
+  GcRoot<mirror::ArtMethod> imt_unimplemented_method_;
   GcRoot<mirror::ObjectArray<mirror::ArtMethod>> default_imt_;
 
   InstructionSet instruction_set_;
diff --git a/runtime/thread_list.cc b/runtime/thread_list.cc
index 011bf96..24ff92e 100644
--- a/runtime/thread_list.cc
+++ b/runtime/thread_list.cc
@@ -625,6 +625,7 @@
     {
       MutexLock mu(self, *Locks::thread_suspend_count_lock_);
       // Update global suspend all state for attaching threads.
+      DCHECK_GE(suspend_all_count_, debug_suspend_all_count_);
       ++suspend_all_count_;
       ++debug_suspend_all_count_;
       // Increment everybody's suspend count (except our own).
@@ -716,6 +717,55 @@
   VLOG(threads) << *self << " self-reviving (debugger)";
 }
 
+void ThreadList::ResumeAllForDebugger() {
+  Thread* self = Thread::Current();
+  Thread* debug_thread = Dbg::GetDebugThread();
+  bool needs_resume = false;
+
+  VLOG(threads) << *self << " ResumeAllForDebugger starting...";
+
+  // Threads can't resume if we exclusively hold the mutator lock.
+  Locks::mutator_lock_->AssertNotExclusiveHeld(self);
+
+  {
+    MutexLock mu(self, *Locks::thread_list_lock_);
+    {
+      MutexLock mu(self, *Locks::thread_suspend_count_lock_);
+      // Update global suspend all state for attaching threads.
+      DCHECK_GE(suspend_all_count_, debug_suspend_all_count_);
+      needs_resume = (debug_suspend_all_count_ > 0);
+      if (needs_resume) {
+        --suspend_all_count_;
+        --debug_suspend_all_count_;
+        // Decrement everybody's suspend count (except our own).
+        for (const auto& thread : list_) {
+          if (thread == self || thread == debug_thread) {
+            continue;
+          }
+          if (thread->GetDebugSuspendCount() == 0) {
+            // This thread may have been individually resumed with ThreadReference.Resume.
+            continue;
+          }
+          VLOG(threads) << "requesting thread resume: " << *thread;
+          thread->ModifySuspendCount(self, -1, true);
+        }
+      } else {
+        // We've been asked to resume all threads without being asked to
+        // suspend them all before. Let's print a warning.
+        LOG(WARNING) << "Debugger attempted to resume all threads without "
+                     << "having suspended them all before.";
+      }
+    }
+  }
+
+  if (needs_resume) {
+    MutexLock mu(self, *Locks::thread_suspend_count_lock_);
+    Thread::resume_cond_->Broadcast(self);
+  }
+
+  VLOG(threads) << *self << " ResumeAllForDebugger complete";
+}
+
 void ThreadList::UndoDebuggerSuspensions() {
   Thread* self = Thread::Current();
 
diff --git a/runtime/thread_list.h b/runtime/thread_list.h
index bb4f775..9f515a8 100644
--- a/runtime/thread_list.h
+++ b/runtime/thread_list.h
@@ -104,6 +104,11 @@
   void SuspendSelfForDebugger()
       LOCKS_EXCLUDED(Locks::thread_suspend_count_lock_);
 
+  // Resume all threads
+  void ResumeAllForDebugger()
+      LOCKS_EXCLUDED(Locks::thread_list_lock_,
+                     Locks::thread_suspend_count_lock_);
+
   void UndoDebuggerSuspensions()
       LOCKS_EXCLUDED(Locks::thread_list_lock_,
                      Locks::thread_suspend_count_lock_);