Suspend the runtime when switching interpreters.

Make the interpreter switch more deterministic by suspending.

This also allows to remove some of the checks - for example,
if we are in mterp the conditions must be favourable already.

Test: ./art/test.py -b --host --64
Change-Id: Ic5fa70592393c7e17decfb84565f3138659340fe
diff --git a/runtime/interpreter/interpreter.cc b/runtime/interpreter/interpreter.cc
index d004d64..d68bf95 100644
--- a/runtime/interpreter/interpreter.cc
+++ b/runtime/interpreter/interpreter.cc
@@ -253,13 +253,6 @@
   DCHECK(!shadow_frame.GetMethod()->IsAbstract());
   DCHECK(!shadow_frame.GetMethod()->IsNative());
 
-  // Check that we are using the right interpreter.
-  if (kIsDebugBuild && self->UseMterp() != CanUseMterp()) {
-    // The flag might be currently being updated on all threads. Retry with lock.
-    MutexLock tll_mu(self, *Locks::thread_list_lock_);
-    DCHECK_EQ(self->UseMterp(), CanUseMterp());
-  }
-
   if (LIKELY(!from_deoptimize)) {  // Entering the method, but not via deoptimization.
     if (kIsDebugBuild) {
       CHECK_EQ(shadow_frame.GetDexPC(), 0u);
diff --git a/runtime/interpreter/interpreter_common.h b/runtime/interpreter/interpreter_common.h
index a633a63..62f5d91 100644
--- a/runtime/interpreter/interpreter_common.h
+++ b/runtime/interpreter/interpreter_common.h
@@ -140,8 +140,10 @@
                                    uint16_t inst_data,
                                    JValue* result)
     REQUIRES_SHARED(Locks::mutator_lock_) {
+  DCHECK_EQ(self->UseMterp(), CanUseMterp());
   // Make sure to check for async exceptions before anything else.
-  if (is_mterp && self->UseMterp()) {
+  if (is_mterp) {
+    DCHECK(self->UseMterp());
     DCHECK(!self->ObserveAsyncException());
   } else if (UNLIKELY(self->ObserveAsyncException())) {
     return false;
@@ -219,7 +221,7 @@
   // If the bit is not set, we explicitly recheck all the conditions.
   // If any of the conditions get falsified, it is important to clear the bit.
   bool use_fast_path = false;
-  if (is_mterp && self->UseMterp()) {
+  if (is_mterp) {
     use_fast_path = called_method->UseFastInterpreterToInterpreterInvoke();
     if (!use_fast_path) {
       use_fast_path = UseFastInterpreterToInterpreterInvoke(called_method);
diff --git a/runtime/interpreter/mterp/mterp.cc b/runtime/interpreter/mterp/mterp.cc
index 912c444..fd1430a 100644
--- a/runtime/interpreter/mterp/mterp.cc
+++ b/runtime/interpreter/mterp/mterp.cc
@@ -546,12 +546,7 @@
 
 extern "C" void MterpCheckBefore(Thread* self, ShadowFrame* shadow_frame, uint16_t* dex_pc_ptr)
     REQUIRES_SHARED(Locks::mutator_lock_) {
-  // Check that we are using the right interpreter.
-  if (kIsDebugBuild && self->UseMterp() != CanUseMterp()) {
-    // The flag might be currently being updated on all threads. Retry with lock.
-    MutexLock tll_mu(self, *Locks::thread_list_lock_);
-    DCHECK_EQ(self->UseMterp(), CanUseMterp());
-  }
+  DCHECK(self->UseMterp());
   DCHECK(!Runtime::Current()->IsActiveTransaction());
   const Instruction* inst = Instruction::At(dex_pc_ptr);
   uint16_t inst_data = inst->Fetch16(0);
diff --git a/runtime/runtime-inl.h b/runtime/runtime-inl.h
index 2ffaf98..c7731f4 100644
--- a/runtime/runtime-inl.h
+++ b/runtime/runtime-inl.h
@@ -28,6 +28,7 @@
 #include "gc_root-inl.h"
 #include "interpreter/mterp/mterp.h"
 #include "obj_ptr-inl.h"
+#include "scoped_thread_state_change-inl.h"
 #include "thread_list.h"
 
 namespace art {
@@ -90,12 +91,23 @@
 }
 
 template<typename Action>
-void Runtime::DoAndMaybeSwitchInterpreter(Action lamda) {
-  MutexLock tll_mu(Thread::Current(), *Locks::thread_list_lock_);
-  lamda();
-  Runtime::Current()->GetThreadList()->ForEach([](Thread* thread, void*) {
-      thread->tls32_.use_mterp.store(interpreter::CanUseMterp());
-  }, nullptr);
+void Runtime::DoAndMaybeSwitchInterpreter(Action lambda) {
+  Thread* self = Thread::Current();
+  if (Runtime::Current()->IsShuttingDown(self) || Locks::mutator_lock_->IsExclusiveHeld(self)) {
+    MutexLock tll_mu(self, *Locks::thread_list_lock_);
+    lambda();
+    Runtime::Current()->GetThreadList()->ForEach([](Thread* thread, void*) {
+        thread->tls32_.use_mterp.store(interpreter::CanUseMterp());
+    }, nullptr);
+  } else {
+    ScopedThreadStateChange tsc(self, kSuspended);
+    ScopedSuspendAll ssa(__FUNCTION__);
+    MutexLock tll_mu(self, *Locks::thread_list_lock_);
+    lambda();
+    Runtime::Current()->GetThreadList()->ForEach([](Thread* thread, void*) {
+        thread->tls32_.use_mterp.store(interpreter::CanUseMterp());
+    }, nullptr);
+  }
 }
 
 }  // namespace art
diff --git a/runtime/runtime.h b/runtime/runtime.h
index b76a658..bd85cf6 100644
--- a/runtime/runtime.h
+++ b/runtime/runtime.h
@@ -669,7 +669,7 @@
   // It ensures that two calls do not interfere with each other and
   // it makes it possible to DCHECK that thread local flag is correct.
   template<typename Action>
-  static void DoAndMaybeSwitchInterpreter(Action lamda);
+  static void DoAndMaybeSwitchInterpreter(Action lambda);
 
   // Returns the build fingerprint, if set. Otherwise an empty string is returned.
   std::string GetFingerprint() {
diff --git a/runtime/thread.cc b/runtime/thread.cc
index 6c41ae4..26bfa44 100644
--- a/runtime/thread.cc
+++ b/runtime/thread.cc
@@ -4156,7 +4156,11 @@
 
 void Thread::SetAsyncException(ObjPtr<mirror::Throwable> new_exception) {
   CHECK(new_exception != nullptr);
-  Runtime::Current()->SetAsyncExceptionsThrown();
+  {
+    StackHandleScope<1> hs(Thread::Current());
+    auto h_exception = hs.NewHandleWrapper(&new_exception);
+    Runtime::Current()->SetAsyncExceptionsThrown();
+  }
   if (kIsDebugBuild) {
     // Make sure we are in a checkpoint.
     MutexLock mu(Thread::Current(), *Locks::thread_suspend_count_lock_);