JDWP: only deoptimize when it is required

We don't need to deoptimize anything when we forced the use of the
interpreter (-Xint). In this case, no compiled code is executed
(except native methods which are not concerned by deoptimization).
Therefore we even don't need to enable/disable deoptimization support
in instrumentation.

We also don't need to deoptimize a method that hasn't been compiled.
Since it will run with interpreter, there is no point deoptimizing
it. However this method may be inlined in a compiled caller method
so we still need to deoptimize everything in this case.

This CL updates breakpoint support by storing the required kind of
deoptimization for a particular method. There are 3 cases:
- kNothing: the method does not require deoptimization.
- kSelectiveDeoptimization: the method needs to be deoptimized.
- kFullDeoptimization: we must deoptimize everythinig.
When uninstalling a breakpoint, we need to do the reverse operation.

Also fixes the SanityCheckExistingBreakpoints function to control
breakpoints related to the given method only and adds extra verbose
ilogs when choosing the appropriate deoptimization kind.

Includes a partial cherry-pick of commit
87553c9fa1298ffb40127b2bb6413859fd3f79df to use method
ClassLinker::GetOatMethodQuickCodeFor.

Bug: 18407046

(cherry picked from commit f3928794a10516e2ac0ffe2686a10891788d4b9c)

Change-Id: I50853cc5fc5c52650485785a1198d35ea0f7fb8e
diff --git a/runtime/class_linker.cc b/runtime/class_linker.cc
index 3d9bbb6..6d22deb 100644
--- a/runtime/class_linker.cc
+++ b/runtime/class_linker.cc
@@ -2467,6 +2467,15 @@
 }
 #endif
 
+const void* ClassLinker::GetOatMethodQuickCodeFor(mirror::ArtMethod* method) {
+  if (method->IsNative() || method->IsAbstract() || method->IsProxyMethod()) {
+    return nullptr;
+  }
+  OatFile::OatMethod oat_method;
+  bool found = FindOatMethodFor(method, &oat_method);
+  return found ? oat_method.GetQuickCode() : nullptr;
+}
+
 const void* ClassLinker::GetQuickOatCodeFor(const DexFile& dex_file, uint16_t class_def_idx,
                                             uint32_t method_idx) {
   OatFile::OatClass oat_class;
diff --git a/runtime/class_linker.h b/runtime/class_linker.h
index a5c1c24..3e45249 100644
--- a/runtime/class_linker.h
+++ b/runtime/class_linker.h
@@ -359,6 +359,12 @@
       SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
 #endif
 
+  // Get compiled code for a method, return null if no code
+  // exists. This is unlike Get..OatCodeFor which will return a bridge
+  // or interpreter entrypoint.
+  const void* GetOatMethodQuickCodeFor(mirror::ArtMethod* method)
+      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+
   pid_t GetClassesLockOwner();  // For SignalCatcher.
   pid_t GetDexLockOwner();  // For SignalCatcher.
 
diff --git a/runtime/debugger.cc b/runtime/debugger.cc
index 1f52c46..3f6bf79 100644
--- a/runtime/debugger.cc
+++ b/runtime/debugger.cc
@@ -183,16 +183,20 @@
 
 class Breakpoint {
  public:
-  Breakpoint(mirror::ArtMethod* method, uint32_t dex_pc, bool need_full_deoptimization)
+  Breakpoint(mirror::ArtMethod* method, uint32_t dex_pc,
+             DeoptimizationRequest::Kind deoptimization_kind)
     SHARED_LOCKS_REQUIRED(Locks::mutator_lock_)
-    : method_(nullptr), dex_pc_(dex_pc), need_full_deoptimization_(need_full_deoptimization) {
+    : method_(nullptr), dex_pc_(dex_pc), deoptimization_kind_(deoptimization_kind) {
+    CHECK(deoptimization_kind_ == DeoptimizationRequest::kNothing ||
+          deoptimization_kind_ == DeoptimizationRequest::kSelectiveDeoptimization ||
+          deoptimization_kind_ == DeoptimizationRequest::kFullDeoptimization);
     ScopedObjectAccessUnchecked soa(Thread::Current());
     method_ = soa.EncodeMethod(method);
   }
 
   Breakpoint(const Breakpoint& other) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_)
     : method_(nullptr), dex_pc_(other.dex_pc_),
-      need_full_deoptimization_(other.need_full_deoptimization_) {
+      deoptimization_kind_(other.deoptimization_kind_) {
     ScopedObjectAccessUnchecked soa(Thread::Current());
     method_ = soa.EncodeMethod(other.Method());
   }
@@ -206,8 +210,8 @@
     return dex_pc_;
   }
 
-  bool NeedFullDeoptimization() const {
-    return need_full_deoptimization_;
+  DeoptimizationRequest::Kind GetDeoptimizationKind() const {
+    return deoptimization_kind_;
   }
 
  private:
@@ -216,7 +220,7 @@
   uint32_t dex_pc_;
 
   // Indicates whether breakpoint needs full deoptimization or selective deoptimization.
-  bool need_full_deoptimization_;
+  DeoptimizationRequest::Kind deoptimization_kind_;
 };
 
 static std::ostream& operator<<(std::ostream& os, const Breakpoint& rhs)
@@ -731,6 +735,12 @@
   return gDisposed;
 }
 
+bool Dbg::RequiresDeoptimization() {
+  // We don't need deoptimization if everything runs with interpreter after
+  // enabling -Xint mode.
+  return !Runtime::Current()->GetInstrumentation()->IsForcedInterpretOnly();
+}
+
 void Dbg::GoActive() {
   // Enable all debugging features, including scans for breakpoints.
   // This is a no-op if we're already active.
@@ -763,7 +773,9 @@
   Thread* self = Thread::Current();
   ThreadState old_state = self->SetStateUnsafe(kRunnable);
   CHECK_NE(old_state, kRunnable);
-  runtime->GetInstrumentation()->EnableDeoptimization();
+  if (RequiresDeoptimization()) {
+    runtime->GetInstrumentation()->EnableDeoptimization();
+  }
   instrumentation_events_ = 0;
   gDebuggerActive = true;
   CHECK_EQ(self->SetStateUnsafe(old_state), kRunnable);
@@ -801,7 +813,9 @@
                                                     instrumentation_events_);
       instrumentation_events_ = 0;
     }
-    runtime->GetInstrumentation()->DisableDeoptimization();
+    if (RequiresDeoptimization()) {
+      runtime->GetInstrumentation()->DisableDeoptimization();
+    }
     gDebuggerActive = false;
   }
   gRegistry->Clear();
@@ -2999,9 +3013,11 @@
 }
 
 void Dbg::DelayFullUndeoptimization() {
-  MutexLock mu(Thread::Current(), *Locks::deoptimization_lock_);
-  ++delayed_full_undeoptimization_count_;
-  DCHECK_LE(delayed_full_undeoptimization_count_, full_deoptimization_event_count_);
+  if (RequiresDeoptimization()) {
+    MutexLock mu(Thread::Current(), *Locks::deoptimization_lock_);
+    ++delayed_full_undeoptimization_count_;
+    DCHECK_LE(delayed_full_undeoptimization_count_, full_deoptimization_event_count_);
+  }
 }
 
 void Dbg::ProcessDelayedFullUndeoptimizations() {
@@ -3159,20 +3175,78 @@
 }
 
 // Sanity checks all existing breakpoints on the same method.
-static void SanityCheckExistingBreakpoints(mirror::ArtMethod* m, bool need_full_deoptimization)
+static void SanityCheckExistingBreakpoints(mirror::ArtMethod* m,
+                                           DeoptimizationRequest::Kind deoptimization_kind)
     SHARED_LOCKS_REQUIRED(Locks::mutator_lock_, Locks::breakpoint_lock_) {
   for (const Breakpoint& breakpoint : gBreakpoints) {
-    CHECK_EQ(need_full_deoptimization, breakpoint.NeedFullDeoptimization());
+    if (breakpoint.Method() == m) {
+      CHECK_EQ(deoptimization_kind, breakpoint.GetDeoptimizationKind());
+    }
   }
-  if (need_full_deoptimization) {
+  instrumentation::Instrumentation* instrumentation = Runtime::Current()->GetInstrumentation();
+  if (deoptimization_kind == DeoptimizationRequest::kFullDeoptimization) {
     // We should have deoptimized everything but not "selectively" deoptimized this method.
-    CHECK(Runtime::Current()->GetInstrumentation()->AreAllMethodsDeoptimized());
-    CHECK(!Runtime::Current()->GetInstrumentation()->IsDeoptimized(m));
-  } else {
+    CHECK(instrumentation->AreAllMethodsDeoptimized());
+    CHECK(!instrumentation->IsDeoptimized(m));
+  } else if (deoptimization_kind == DeoptimizationRequest::kSelectiveDeoptimization) {
     // We should have "selectively" deoptimized this method.
     // Note: while we have not deoptimized everything for this method, we may have done it for
     // another event.
-    CHECK(Runtime::Current()->GetInstrumentation()->IsDeoptimized(m));
+    CHECK(instrumentation->IsDeoptimized(m));
+  } else {
+    // This method does not require deoptimization.
+    CHECK_EQ(deoptimization_kind, DeoptimizationRequest::kNothing);
+    CHECK(!instrumentation->IsDeoptimized(m));
+  }
+}
+
+static DeoptimizationRequest::Kind GetRequiredDeoptimizationKind(Thread* self,
+                                                                 mirror::ArtMethod* m)
+    SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+  if (!Dbg::RequiresDeoptimization()) {
+    // We already run in interpreter-only mode so we don't need to deoptimize anything.
+    VLOG(jdwp) << "No need for deoptimization when fully running with interpreter for method "
+               << PrettyMethod(m);
+    return DeoptimizationRequest::kNothing;
+  }
+  const Breakpoint* existing_breakpoint;
+  {
+    ReaderMutexLock mu(self, *Locks::breakpoint_lock_);
+    existing_breakpoint = FindFirstBreakpointForMethod(m);
+  }
+  if (existing_breakpoint == nullptr) {
+    // There is no breakpoint on this method yet: we need to deoptimize. If this method may be
+    // inlined, we deoptimize everything; otherwise we deoptimize only this method.
+    // Note: IsMethodPossiblyInlined goes into the method verifier and may cause thread suspension.
+    // Therefore we must not hold any lock when we call it.
+    bool need_full_deoptimization = IsMethodPossiblyInlined(self, m);
+    if (need_full_deoptimization) {
+      VLOG(jdwp) << "Need full deoptimization because of possible inlining of method "
+                 << PrettyMethod(m);
+      return DeoptimizationRequest::kFullDeoptimization;
+    } else {
+      // We don't need to deoptimize if the method has not been compiled.
+      ClassLinker* const class_linker = Runtime::Current()->GetClassLinker();
+      const bool is_compiled = class_linker->GetOatMethodQuickCodeFor(m) != nullptr;
+      if (is_compiled) {
+        VLOG(jdwp) << "Need selective deoptimization for compiled method " << PrettyMethod(m);
+        return DeoptimizationRequest::kSelectiveDeoptimization;
+      } else {
+        // Method is not compiled: we don't need to deoptimize.
+        VLOG(jdwp) << "No need for deoptimization for non-compiled method " << PrettyMethod(m);
+        return DeoptimizationRequest::kNothing;
+      }
+    }
+  } else {
+    // There is at least one breakpoint for this method: we don't need to deoptimize.
+    // Let's check that all breakpoints are configured the same way for deoptimization.
+    VLOG(jdwp) << "Breakpoint already set: no deoptimization is required";
+    DeoptimizationRequest::Kind deoptimization_kind = existing_breakpoint->GetDeoptimizationKind();
+    if (kIsDebugBuild) {
+      ReaderMutexLock mu(self, *Locks::breakpoint_lock_);
+      SanityCheckExistingBreakpoints(m, deoptimization_kind);
+    }
+    return DeoptimizationRequest::kNothing;
   }
 }
 
@@ -3183,40 +3257,19 @@
   mirror::ArtMethod* m = FromMethodId(location->method_id);
   DCHECK(m != nullptr) << "No method for method id " << location->method_id;
 
-  const Breakpoint* existing_breakpoint;
-  {
-    ReaderMutexLock mu(self, *Locks::breakpoint_lock_);
-    existing_breakpoint = FindFirstBreakpointForMethod(m);
-  }
-  bool need_full_deoptimization;
-  if (existing_breakpoint == nullptr) {
-    // There is no breakpoint on this method yet: we need to deoptimize. If this method may be
-    // inlined, we deoptimize everything; otherwise we deoptimize only this method.
-    // Note: IsMethodPossiblyInlined goes into the method verifier and may cause thread suspension.
-    // Therefore we must not hold any lock when we call it.
-    need_full_deoptimization = IsMethodPossiblyInlined(self, m);
-    if (need_full_deoptimization) {
-      req->SetKind(DeoptimizationRequest::kFullDeoptimization);
-      req->SetMethod(nullptr);
-    } else {
-      req->SetKind(DeoptimizationRequest::kSelectiveDeoptimization);
-      req->SetMethod(m);
-    }
+  const DeoptimizationRequest::Kind deoptimization_kind = GetRequiredDeoptimizationKind(self, m);
+  req->SetKind(deoptimization_kind);
+  if (deoptimization_kind == DeoptimizationRequest::kSelectiveDeoptimization) {
+    req->SetMethod(m);
   } else {
-    // There is at least one breakpoint for this method: we don't need to deoptimize.
-    req->SetKind(DeoptimizationRequest::kNothing);
+    CHECK(deoptimization_kind == DeoptimizationRequest::kNothing ||
+          deoptimization_kind == DeoptimizationRequest::kFullDeoptimization);
     req->SetMethod(nullptr);
-
-    need_full_deoptimization = existing_breakpoint->NeedFullDeoptimization();
-    if (kIsDebugBuild) {
-      ReaderMutexLock mu(self, *Locks::breakpoint_lock_);
-      SanityCheckExistingBreakpoints(m, need_full_deoptimization);
-    }
   }
 
   {
     WriterMutexLock mu(self, *Locks::breakpoint_lock_);
-    gBreakpoints.push_back(Breakpoint(m, location->dex_pc, need_full_deoptimization));
+    gBreakpoints.push_back(Breakpoint(m, location->dex_pc, deoptimization_kind));
     VLOG(jdwp) << "Set breakpoint #" << (gBreakpoints.size() - 1) << ": "
                << gBreakpoints[gBreakpoints.size() - 1];
   }
@@ -3228,12 +3281,13 @@
   WriterMutexLock mu(Thread::Current(), *Locks::breakpoint_lock_);
   mirror::ArtMethod* m = FromMethodId(location->method_id);
   DCHECK(m != nullptr) << "No method for method id " << location->method_id;
-  bool need_full_deoptimization = false;
+  DeoptimizationRequest::Kind deoptimization_kind = DeoptimizationRequest::kNothing;
   for (size_t i = 0, e = gBreakpoints.size(); i < e; ++i) {
     if (gBreakpoints[i].DexPc() == location->dex_pc && gBreakpoints[i].Method() == m) {
       VLOG(jdwp) << "Removed breakpoint #" << i << ": " << gBreakpoints[i];
-      need_full_deoptimization = gBreakpoints[i].NeedFullDeoptimization();
-      DCHECK_NE(need_full_deoptimization, Runtime::Current()->GetInstrumentation()->IsDeoptimized(m));
+      deoptimization_kind = gBreakpoints[i].GetDeoptimizationKind();
+      DCHECK_EQ(deoptimization_kind == DeoptimizationRequest::kSelectiveDeoptimization,
+                Runtime::Current()->GetInstrumentation()->IsDeoptimized(m));
       gBreakpoints.erase(gBreakpoints.begin() + i);
       break;
     }
@@ -3241,21 +3295,26 @@
   const Breakpoint* const existing_breakpoint = FindFirstBreakpointForMethod(m);
   if (existing_breakpoint == nullptr) {
     // There is no more breakpoint on this method: we need to undeoptimize.
-    if (need_full_deoptimization) {
+    if (deoptimization_kind == DeoptimizationRequest::kFullDeoptimization) {
       // This method required full deoptimization: we need to undeoptimize everything.
       req->SetKind(DeoptimizationRequest::kFullUndeoptimization);
       req->SetMethod(nullptr);
-    } else {
+    } else if (deoptimization_kind == DeoptimizationRequest::kSelectiveDeoptimization) {
       // This method required selective deoptimization: we need to undeoptimize only that method.
       req->SetKind(DeoptimizationRequest::kSelectiveUndeoptimization);
       req->SetMethod(m);
+    } else {
+      // This method had no need for deoptimization: do nothing.
+      CHECK_EQ(deoptimization_kind, DeoptimizationRequest::kNothing);
+      req->SetKind(DeoptimizationRequest::kNothing);
+      req->SetMethod(nullptr);
     }
   } else {
     // There is at least one breakpoint for this method: we don't need to undeoptimize.
     req->SetKind(DeoptimizationRequest::kNothing);
     req->SetMethod(nullptr);
     if (kIsDebugBuild) {
-      SanityCheckExistingBreakpoints(m, need_full_deoptimization);
+      SanityCheckExistingBreakpoints(m, deoptimization_kind);
     }
   }
 }
diff --git a/runtime/debugger.h b/runtime/debugger.h
index 131de2c..3fdd823 100644
--- a/runtime/debugger.h
+++ b/runtime/debugger.h
@@ -522,6 +522,9 @@
       LOCKS_EXCLUDED(Locks::breakpoint_lock_)
       SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
 
+  // Indicates whether we need deoptimization for debugging.
+  static bool RequiresDeoptimization();
+
   // Records deoptimization request in the queue.
   static void RequestDeoptimization(const DeoptimizationRequest& req)
       LOCKS_EXCLUDED(Locks::deoptimization_lock_)
diff --git a/runtime/jdwp/jdwp_event.cc b/runtime/jdwp/jdwp_event.cc
index 10a017c..2cdf272 100644
--- a/runtime/jdwp/jdwp_event.cc
+++ b/runtime/jdwp/jdwp_event.cc
@@ -125,6 +125,10 @@
 };
 
 static bool NeedsFullDeoptimization(JdwpEventKind eventKind) {
+  if (!Dbg::RequiresDeoptimization()) {
+    // We don't need deoptimization for debugging.
+    return false;
+  }
   switch (eventKind) {
       case EK_METHOD_ENTRY:
       case EK_METHOD_EXIT: