Cache the value of MterpShouldSwitchInterpreters()

Add field to the Thread object which stores the value instead (negated).
Explicitly update the field when relevant state changes (which is rare).

This speeds up golem interpreter benchmarks by 3.5%
on average with some benchmarks up to 15% faster.

Test: test.py -b -r --interpreter --host
Change-Id: If2df0d3bf9e69ab50c30102b2648e997927c34d8
diff --git a/compiler/utils/assembler_thumb_test_expected.cc.inc b/compiler/utils/assembler_thumb_test_expected.cc.inc
index 85e4326..0d279ed 100644
--- a/compiler/utils/assembler_thumb_test_expected.cc.inc
+++ b/compiler/utils/assembler_thumb_test_expected.cc.inc
@@ -76,7 +76,7 @@
   "  f0:	f1bc 0f00 	cmp.w	ip, #0\n",
   "  f4:	bf18      	it	ne\n",
   "  f6:	f20d 4c01 	addwne	ip, sp, #1025	; 0x401\n",
-  "  fa:	f8d9 c08c 	ldr.w	ip, [r9, #140]	; 0x8c\n",
+  "  fa:	f8d9 c094 	ldr.w	ip, [r9, #148]	; 0x94\n",
   "  fe:	f1bc 0f00 	cmp.w	ip, #0\n",
   " 102:	d171      	bne.n	1e8 <VixlJniHelpers+0x1e8>\n",
   " 104:	f8cd c7ff 	str.w	ip, [sp, #2047]	; 0x7ff\n",
@@ -153,7 +153,7 @@
   " 21c:	f8d9 8034 	ldr.w	r8, [r9, #52]	; 0x34\n",
   " 220:	4770      	bx	lr\n",
   " 222:	4660      	mov	r0, ip\n",
-  " 224:	f8d9 c2d4 	ldr.w	ip, [r9, #724]	; 0x2d4\n",
+  " 224:	f8d9 c2dc 	ldr.w	ip, [r9, #732]	; 0x2dc\n",
   " 228:	47e0      	blx	ip\n",
   nullptr
 };
diff --git a/openjdkjvmti/events-inl.h b/openjdkjvmti/events-inl.h
index ca66556..8e06fe3 100644
--- a/openjdkjvmti/events-inl.h
+++ b/openjdkjvmti/events-inl.h
@@ -25,6 +25,7 @@
 #include "events.h"
 #include "jni/jni_internal.h"
 #include "nativehelper/scoped_local_ref.h"
+#include "runtime-inl.h"
 #include "scoped_thread_state_change-inl.h"
 #include "stack.h"
 #include "ti_breakpoint.h"
diff --git a/runtime/debugger.cc b/runtime/debugger.cc
index b679cbe..9b5b84a 100644
--- a/runtime/debugger.cc
+++ b/runtime/debugger.cc
@@ -65,6 +65,7 @@
 #include "oat_file.h"
 #include "obj_ptr-inl.h"
 #include "reflection.h"
+#include "runtime-inl.h"
 #include "scoped_thread_state_change-inl.h"
 #include "stack.h"
 #include "thread_list.h"
@@ -688,7 +689,7 @@
     runtime->GetInstrumentation()->EnableDeoptimization();
   }
   instrumentation_events_ = 0;
-  gDebuggerActive = true;
+  Runtime::DoAndMaybeSwitchInterpreter([=](){ gDebuggerActive = true; });
   Runtime::Current()->GetRuntimeCallbacks()->AddMethodInspectionCallback(&gDebugActiveCallback);
   LOG(INFO) << "Debugger is active";
 }
@@ -726,7 +727,7 @@
       if (RequiresDeoptimization()) {
         runtime->GetInstrumentation()->DisableDeoptimization(kDbgInstrumentationKey);
       }
-      gDebuggerActive = false;
+      Runtime::DoAndMaybeSwitchInterpreter([=](){ gDebuggerActive = false; });
       Runtime::Current()->GetRuntimeCallbacks()->RemoveMethodInspectionCallback(
           &gDebugActiveCallback);
     }
diff --git a/runtime/instrumentation.cc b/runtime/instrumentation.cc
index 4937132..5c7b0ae 100644
--- a/runtime/instrumentation.cc
+++ b/runtime/instrumentation.cc
@@ -43,6 +43,7 @@
 #include "mirror/object_array-inl.h"
 #include "nth_caller_visitor.h"
 #include "oat_quick_method_header.h"
+#include "runtime-inl.h"
 #include "thread.h"
 #include "thread_list.h"
 
@@ -536,7 +537,7 @@
   } else {
     list.push_back(listener);
   }
-  *has_listener = true;
+  Runtime::DoAndMaybeSwitchInterpreter([=](){ *has_listener = true; });
 }
 
 void Instrumentation::AddListener(InstrumentationListener* listener, uint32_t events) {
@@ -614,11 +615,11 @@
   // Check if the list contains any non-null listener, and update 'has_listener'.
   for (InstrumentationListener* l : list) {
     if (l != nullptr) {
-      *has_listener = true;
+      Runtime::DoAndMaybeSwitchInterpreter([=](){ *has_listener = true; });
       return;
     }
   }
-  *has_listener = false;
+  Runtime::DoAndMaybeSwitchInterpreter([=](){ *has_listener = false; });
 }
 
 void Instrumentation::RemoveListener(InstrumentationListener* listener, uint32_t events) {
diff --git a/runtime/interpreter/interpreter.cc b/runtime/interpreter/interpreter.cc
index 2ae95dc..8a31985 100644
--- a/runtime/interpreter/interpreter.cc
+++ b/runtime/interpreter/interpreter.cc
@@ -321,7 +321,7 @@
       } else {
         while (true) {
           // Mterp does not support all instrumentation/debugging.
-          if (MterpShouldSwitchInterpreters() != 0) {
+          if (!self->UseMterp()) {
             return ExecuteSwitchImpl<false, false>(self, accessor, shadow_frame, result_register,
                                                    false);
           }
diff --git a/runtime/interpreter/mterp/arm/invoke.S b/runtime/interpreter/mterp/arm/invoke.S
index 8693d3b..08fd1bb 100644
--- a/runtime/interpreter/mterp/arm/invoke.S
+++ b/runtime/interpreter/mterp/arm/invoke.S
@@ -14,9 +14,9 @@
     cmp     r0, #0
     beq     MterpException
     FETCH_ADVANCE_INST 3
-    bl      MterpShouldSwitchInterpreters
+    ldr     r0, [rSELF, #THREAD_USE_MTERP_OFFSET]
     cmp     r0, #0
-    bne     MterpFallback
+    beq     MterpFallback
     GET_INST_OPCODE ip
     GOTO_OPCODE ip
 
@@ -37,9 +37,9 @@
     cmp     r0, #0
     beq     MterpException
     FETCH_ADVANCE_INST 4
-    bl      MterpShouldSwitchInterpreters
+    ldr     r0, [rSELF, #THREAD_USE_MTERP_OFFSET]
     cmp     r0, #0
-    bne     MterpFallback
+    beq     MterpFallback
     GET_INST_OPCODE ip
     GOTO_OPCODE ip
 
diff --git a/runtime/interpreter/mterp/arm/main.S b/runtime/interpreter/mterp/arm/main.S
index f5fdf14..a9cffe7 100644
--- a/runtime/interpreter/mterp/arm/main.S
+++ b/runtime/interpreter/mterp/arm/main.S
@@ -531,9 +531,9 @@
     ldr     rIBASE, [rSELF, #THREAD_CURRENT_IBASE_OFFSET]
     add     rPC, r0, r1, lsl #1                     @ generate new dex_pc_ptr
     /* Do we need to switch interpreters? */
-    bl      MterpShouldSwitchInterpreters
+    ldr     r0, [rSELF, #THREAD_USE_MTERP_OFFSET]
     cmp     r0, #0
-    bne     MterpFallback
+    beq     MterpFallback
     /* resume execution at catch block */
     EXPORT_PC
     FETCH_INST
diff --git a/runtime/interpreter/mterp/arm64/invoke.S b/runtime/interpreter/mterp/arm64/invoke.S
index 03ac316..4844213 100644
--- a/runtime/interpreter/mterp/arm64/invoke.S
+++ b/runtime/interpreter/mterp/arm64/invoke.S
@@ -13,8 +13,8 @@
     bl      $helper
     cbz     w0, MterpException
     FETCH_ADVANCE_INST 3
-    bl      MterpShouldSwitchInterpreters
-    cbnz    w0, MterpFallback
+    ldr     w0, [xSELF, #THREAD_USE_MTERP_OFFSET]
+    cbz     w0, MterpFallback
     GET_INST_OPCODE ip
     GOTO_OPCODE ip
 
@@ -34,8 +34,8 @@
     bl      $helper
     cbz     w0, MterpException
     FETCH_ADVANCE_INST 4
-    bl      MterpShouldSwitchInterpreters
-    cbnz    w0, MterpFallback
+    ldr     w0, [xSELF, #THREAD_USE_MTERP_OFFSET]
+    cbz     w0, MterpFallback
     GET_INST_OPCODE ip
     GOTO_OPCODE ip
 
diff --git a/runtime/interpreter/mterp/arm64/main.S b/runtime/interpreter/mterp/arm64/main.S
index 1b72e79..858cb38 100644
--- a/runtime/interpreter/mterp/arm64/main.S
+++ b/runtime/interpreter/mterp/arm64/main.S
@@ -553,8 +553,8 @@
     ldr     xIBASE, [xSELF, #THREAD_CURRENT_IBASE_OFFSET]
     add     xPC, x0, x1, lsl #1                     // generate new dex_pc_ptr
     /* Do we need to switch interpreters? */
-    bl      MterpShouldSwitchInterpreters
-    cbnz    w0, MterpFallback
+    ldr     w0, [xSELF, #THREAD_USE_MTERP_OFFSET]
+    cbz     w0, MterpFallback
     /* resume execution at catch block */
     EXPORT_PC
     FETCH_INST
diff --git a/runtime/interpreter/mterp/mterp.cc b/runtime/interpreter/mterp/mterp.cc
index be985ff..c9a8adc 100644
--- a/runtime/interpreter/mterp/mterp.cc
+++ b/runtime/interpreter/mterp/mterp.cc
@@ -142,27 +142,19 @@
   return entries[index];
 }
 
-extern "C" size_t MterpShouldSwitchInterpreters()
+bool CanUseMterp()
     REQUIRES_SHARED(Locks::mutator_lock_) {
   const Runtime* const runtime = Runtime::Current();
-  const instrumentation::Instrumentation* const instrumentation = runtime->GetInstrumentation();
-  return instrumentation->NonJitProfilingActive() ||
-      Dbg::IsDebuggerActive() ||
+  return
+      !Dbg::IsDebuggerActive() &&
+      !runtime->GetInstrumentation()->NonJitProfilingActive() &&
       // mterp only knows how to deal with the normal exits. It cannot handle any of the
       // non-standard force-returns.
-      // TODO We really only need to switch interpreters if a PopFrame has actually happened. We
-      // should check this here.
-      UNLIKELY(runtime->AreNonStandardExitsEnabled()) ||
+      !runtime->AreNonStandardExitsEnabled() &&
       // An async exception has been thrown. We need to go to the switch interpreter. MTerp doesn't
       // know how to deal with these so we could end up never dealing with it if we are in an
-      // infinite loop. Since this can be called in a tight loop and getting the current thread
-      // requires a TLS read we instead first check a short-circuit runtime flag that will only be
-      // set if something tries to set an async exception. This will make this function faster in
-      // the common case where no async exception has ever been sent. We don't need to worry about
-      // synchronization on the runtime flag since it is only set in a checkpoint which will either
-      // take place on the current thread or act as a synchronization point.
-      (UNLIKELY(runtime->AreAsyncExceptionsThrown()) &&
-       Thread::Current()->IsAsyncExceptionPending());
+      // infinite loop.
+      !runtime->AreAsyncExceptionsThrown();
 }
 
 
@@ -562,6 +554,12 @@
 
 extern "C" void MterpCheckBefore(Thread* self, ShadowFrame* shadow_frame, uint16_t* dex_pc_ptr)
     REQUIRES_SHARED(Locks::mutator_lock_) {
+  // Check that we are using the right interpreter.
+  if (kIsDebugBuild && self->UseMterp() != CanUseMterp()) {
+    // The flag might be currently being updated on all threads. Retry with lock.
+    MutexLock tll_mu(self, *Locks::thread_list_lock_);
+    DCHECK_EQ(self->UseMterp(), CanUseMterp());
+  }
   const Instruction* inst = Instruction::At(dex_pc_ptr);
   uint16_t inst_data = inst->Fetch16(0);
   if (inst->Opcode(inst_data) == Instruction::MOVE_EXCEPTION) {
@@ -661,7 +659,7 @@
 extern "C" size_t MterpSuspendCheck(Thread* self)
     REQUIRES_SHARED(Locks::mutator_lock_) {
   self->AllowThreadSuspension();
-  return MterpShouldSwitchInterpreters();
+  return !self->UseMterp();
 }
 
 // Execute single field access instruction (get/put, static/instance).
diff --git a/runtime/interpreter/mterp/mterp.h b/runtime/interpreter/mterp/mterp.h
index 81a53c8..af52758 100644
--- a/runtime/interpreter/mterp/mterp.h
+++ b/runtime/interpreter/mterp/mterp.h
@@ -34,12 +34,7 @@
 
 void InitMterpTls(Thread* self);
 void CheckMterpAsmConstants();
-
-// The return type should be 'bool' but our assembly stubs expect 'bool'
-// to be zero-extended to the whole register and that's broken on x86-64
-// as a 'bool' is returned in 'al' and the rest of 'rax' is garbage.
-// TODO: Fix mterp and stubs and revert this workaround. http://b/30232671
-extern "C" size_t MterpShouldSwitchInterpreters();
+bool CanUseMterp();
 
 // Poison value for TestExportPC.  If we segfault with this value, it means that a mterp
 // handler for a recent opcode failed to export the Dalvik PC prior to a possible exit from
diff --git a/runtime/interpreter/mterp/x86/invoke.S b/runtime/interpreter/mterp/x86/invoke.S
index 587c4cf..cfb9c7c 100644
--- a/runtime/interpreter/mterp/x86/invoke.S
+++ b/runtime/interpreter/mterp/x86/invoke.S
@@ -17,9 +17,10 @@
     testb   %al, %al
     jz      MterpException
     ADVANCE_PC 3
-    call    SYMBOL(MterpShouldSwitchInterpreters)
+    movl    rSELF, %eax
+    movb    THREAD_USE_MTERP_OFFSET(%eax), %al
     testb   %al, %al
-    jnz     MterpFallback
+    jz      MterpFallback
     RESTORE_IBASE
     FETCH_INST
     GOTO_NEXT
@@ -43,9 +44,10 @@
     testb   %al, %al
     jz      MterpException
     ADVANCE_PC 4
-    call    SYMBOL(MterpShouldSwitchInterpreters)
+    movl    rSELF, %eax
+    movb    THREAD_USE_MTERP_OFFSET(%eax), %al
     testb   %al, %al
-    jnz     MterpFallback
+    jz      MterpFallback
     RESTORE_IBASE
     FETCH_INST
     GOTO_NEXT
diff --git a/runtime/interpreter/mterp/x86/main.S b/runtime/interpreter/mterp/x86/main.S
index 04b653e..b233f2c 100644
--- a/runtime/interpreter/mterp/x86/main.S
+++ b/runtime/interpreter/mterp/x86/main.S
@@ -560,9 +560,10 @@
     lea     (%eax, %ecx, 2), rPC
     movl    rPC, OFF_FP_DEX_PC_PTR(rFP)
     /* Do we need to switch interpreters? */
-    call    SYMBOL(MterpShouldSwitchInterpreters)
+    movl    rSELF, %eax
+    movb    THREAD_USE_MTERP_OFFSET(%eax), %al
     testb   %al, %al
-    jnz     MterpFallback
+    jz      MterpFallback
     /* resume execution at catch block */
     REFRESH_IBASE
     FETCH_INST
diff --git a/runtime/interpreter/mterp/x86_64/invoke.S b/runtime/interpreter/mterp/x86_64/invoke.S
index 63c233c..f727915 100644
--- a/runtime/interpreter/mterp/x86_64/invoke.S
+++ b/runtime/interpreter/mterp/x86_64/invoke.S
@@ -15,9 +15,10 @@
     testb   %al, %al
     jz      MterpException
     ADVANCE_PC 3
-    call    SYMBOL(MterpShouldSwitchInterpreters)
+    movq    rSELF, %rax
+    movb    THREAD_USE_MTERP_OFFSET(%rax), %al
     testb   %al, %al
-    jnz     MterpFallback
+    jz      MterpFallback
     FETCH_INST
     GOTO_NEXT
 
@@ -38,9 +39,10 @@
     testb   %al, %al
     jz      MterpException
     ADVANCE_PC 4
-    call    SYMBOL(MterpShouldSwitchInterpreters)
+    movq    rSELF, %rax
+    movb    THREAD_USE_MTERP_OFFSET(%rax), %al
     testb   %al, %al
-    jnz     MterpFallback
+    jz      MterpFallback
     FETCH_INST
     GOTO_NEXT
 
diff --git a/runtime/interpreter/mterp/x86_64/main.S b/runtime/interpreter/mterp/x86_64/main.S
index e283bbe..75eb00c 100644
--- a/runtime/interpreter/mterp/x86_64/main.S
+++ b/runtime/interpreter/mterp/x86_64/main.S
@@ -526,9 +526,10 @@
     leaq    (%rax, %rcx, 2), rPC
     movq    rPC, OFF_FP_DEX_PC_PTR(rFP)
     /* Do we need to switch interpreters? */
-    call    SYMBOL(MterpShouldSwitchInterpreters)
+    movq    rSELF, %rax
+    movb    THREAD_USE_MTERP_OFFSET(%rax), %al
     testb   %al, %al
-    jnz     MterpFallback
+    jz      MterpFallback
     /* resume execution at catch block */
     REFRESH_IBASE
     FETCH_INST
diff --git a/runtime/runtime-inl.h b/runtime/runtime-inl.h
index bde0d11..e6cc471 100644
--- a/runtime/runtime-inl.h
+++ b/runtime/runtime-inl.h
@@ -25,7 +25,9 @@
 #include "base/casts.h"
 #include "entrypoints/quick/callee_save_frame.h"
 #include "gc_root-inl.h"
+#include "interpreter/mterp/mterp.h"
 #include "obj_ptr-inl.h"
+#include "thread_list.h"
 
 namespace art {
 
@@ -86,6 +88,15 @@
   return reinterpret_cast64<ArtMethod*>(callee_save_methods_[static_cast<size_t>(type)]);
 }
 
+template<typename Action>
+void Runtime::DoAndMaybeSwitchInterpreter(Action lamda) {
+  MutexLock tll_mu(Thread::Current(), *Locks::thread_list_lock_);
+  lamda();
+  Runtime::Current()->GetThreadList()->ForEach([](Thread* thread, void*) {
+      thread->tls32_.use_mterp.store(interpreter::CanUseMterp());
+  }, nullptr);
+}
+
 }  // namespace art
 
 #endif  // ART_RUNTIME_RUNTIME_INL_H_
diff --git a/runtime/runtime.h b/runtime/runtime.h
index 398a48d..e27c87d 100644
--- a/runtime/runtime.h
+++ b/runtime/runtime.h
@@ -659,7 +659,7 @@
   }
 
   void SetNonStandardExitsEnabled() {
-    non_standard_exits_enabled_ = true;
+    DoAndMaybeSwitchInterpreter([=](){ non_standard_exits_enabled_ = true; });
   }
 
   bool AreAsyncExceptionsThrown() const {
@@ -667,9 +667,20 @@
   }
 
   void SetAsyncExceptionsThrown() {
-    async_exceptions_thrown_ = true;
+    DoAndMaybeSwitchInterpreter([=](){ async_exceptions_thrown_ = true; });
   }
 
+  // Change state and re-check which interpreter should be used.
+  //
+  // This must be called whenever there is an event that forces
+  // us to use different interpreter (e.g. debugger is attached).
+  //
+  // Changing the state using the lamda gives us some multihreading safety.
+  // It ensures that two calls do not interfere with each other and
+  // it makes it possible to DCHECK that thread local flag is correct.
+  template<typename Action>
+  static void DoAndMaybeSwitchInterpreter(Action lamda);
+
   // Returns the build fingerprint, if set. Otherwise an empty string is returned.
   std::string GetFingerprint() {
     return fingerprint_;
diff --git a/runtime/thread.cc b/runtime/thread.cc
index b3492e1..2e04e0c 100644
--- a/runtime/thread.cc
+++ b/runtime/thread.cc
@@ -72,6 +72,7 @@
 #include "handle_scope-inl.h"
 #include "indirect_reference_table-inl.h"
 #include "interpreter/interpreter.h"
+#include "interpreter/mterp/mterp.h"
 #include "interpreter/shadow_frame-inl.h"
 #include "java_frame_root_info.h"
 #include "jni/java_vm_ext.h"
@@ -93,6 +94,7 @@
 #include "quick_exception_handler.h"
 #include "read_barrier-inl.h"
 #include "reflection.h"
+#include "runtime-inl.h"
 #include "runtime.h"
 #include "runtime_callbacks.h"
 #include "scoped_thread_state_change-inl.h"
@@ -2141,6 +2143,11 @@
   tlsPtr_.flip_function = nullptr;
   tlsPtr_.thread_local_mark_stack = nullptr;
   tls32_.is_transitioning_to_runnable = false;
+  tls32_.use_mterp = false;
+}
+
+void Thread::NotifyInTheadList() {
+  tls32_.use_mterp = interpreter::CanUseMterp();
 }
 
 bool Thread::CanLoadClasses() const {
diff --git a/runtime/thread.h b/runtime/thread.h
index d7dc5ae..941867c 100644
--- a/runtime/thread.h
+++ b/runtime/thread.h
@@ -19,6 +19,7 @@
 
 #include <setjmp.h>
 
+#include <atomic>
 #include <bitset>
 #include <deque>
 #include <iosfwd>
@@ -672,6 +673,13 @@
   }
 
   template<PointerSize pointer_size>
+  static constexpr ThreadOffset<pointer_size> UseMterpOffset() {
+    return ThreadOffset<pointer_size>(
+        OFFSETOF_MEMBER(Thread, tls32_) +
+        OFFSETOF_MEMBER(tls_32bit_sized_values, use_mterp));
+  }
+
+  template<PointerSize pointer_size>
   static constexpr ThreadOffset<pointer_size> IsGcMarkingOffset() {
     return ThreadOffset<pointer_size>(
         OFFSETOF_MEMBER(Thread, tls32_) +
@@ -1113,6 +1121,10 @@
     tls32_.state_and_flags.as_atomic_int.fetch_and(-1 ^ flag, std::memory_order_seq_cst);
   }
 
+  bool UseMterp() const {
+    return tls32_.use_mterp.load();
+  }
+
   void ResetQuickAllocEntryPointsForThread(bool is_marking);
 
   // Returns the remaining space in the TLAB.
@@ -1283,6 +1295,9 @@
   ~Thread() REQUIRES(!Locks::mutator_lock_, !Locks::thread_suspend_count_lock_);
   void Destroy();
 
+  void NotifyInTheadList()
+      REQUIRES_SHARED(Locks::thread_list_lock_);
+
   // Attaches the calling native thread to the runtime, returning the new native peer.
   // Used to implement JNI AttachCurrentThread and AttachCurrentThreadAsDaemon calls.
   template <typename PeerAction>
@@ -1547,6 +1562,10 @@
     // This should have GUARDED_BY(Locks::user_code_suspension_lock_) but auto analysis cannot be
     // told that AssertHeld should be good enough.
     int user_code_suspend_count GUARDED_BY(Locks::thread_suspend_count_lock_);
+
+    // True if everything is in the ideal state for fast interpretation.
+    // False if we need to switch to the C++ interpreter to handle special cases.
+    std::atomic<bool32_t> use_mterp;
   } tls32_;
 
   struct PACKED(8) tls_64bit_sized_values {
diff --git a/runtime/thread_list.cc b/runtime/thread_list.cc
index ec40716..f8c90b1 100644
--- a/runtime/thread_list.cc
+++ b/runtime/thread_list.cc
@@ -1431,6 +1431,7 @@
     }
     self->SetWeakRefAccessEnabled(cc->IsWeakRefAccessEnabled());
   }
+  self->NotifyInTheadList();
 }
 
 void ThreadList::Unregister(Thread* self) {
diff --git a/tools/cpp-define-generator/thread.def b/tools/cpp-define-generator/thread.def
index 7b19076..8c91dc8 100644
--- a/tools/cpp-define-generator/thread.def
+++ b/tools/cpp-define-generator/thread.def
@@ -56,5 +56,7 @@
            art::kSuspendRequest | art::kCheckpointRequest | art::kEmptyCheckpointRequest)
 ASM_DEFINE(THREAD_SUSPEND_REQUEST,
            art::kSuspendRequest)
+ASM_DEFINE(THREAD_USE_MTERP_OFFSET,
+           art::Thread::UseMterpOffset<art::kRuntimePointerSize>().Int32Value())
 ASM_DEFINE(THREAD_TOP_QUICK_FRAME_OFFSET,
            art::Thread::TopOfManagedStackOffset<art::kRuntimePointerSize>().Int32Value())