Move back to jit code on trace removal

This changes the lock hierarchy so the lock checker is able to
correctly determine that we will not deadlock. This lets us replace
the jit code in the method when tracing is removed.

Test: ./test.py --host

Change-Id: I14dd4eb9814c73fa3639239bb56d91c8303cec60
diff --git a/runtime/base/mutex.h b/runtime/base/mutex.h
index ee47e7c..ced0cb1 100644
--- a/runtime/base/mutex.h
+++ b/runtime/base/mutex.h
@@ -65,10 +65,14 @@
   kAbortLock,
   kNativeDebugInterfaceLock,
   kSignalHandlingLock,
+  // A generic lock level for mutexs that should not allow any additional mutexes to be gained after
+  // acquiring it.
+  kGenericBottomLock,
   kJdwpAdbStateLock,
   kJdwpSocketLock,
   kRegionSpaceRegionLock,
   kMarkSweepMarkStackLock,
+  kJitCodeCacheLock,
   kRosAllocGlobalLock,
   kRosAllocBracketLock,
   kRosAllocBulkFreeLock,
@@ -94,7 +98,6 @@
   kOatFileManagerLock,
   kTracingUniqueMethodsLock,
   kTracingStreamingLock,
-  kDeoptimizedMethodsLock,
   kClassLoaderClassesLock,
   kDefaultMutexLevel,
   kDexLock,
@@ -105,7 +108,6 @@
   kMonitorPoolLock,
   kClassLinkerClassesLock,  // TODO rename.
   kDexToDexCompilerLock,
-  kJitCodeCacheLock,
   kCHALock,
   kSubtypeCheckLock,
   kBreakpointLock,
@@ -736,6 +738,12 @@
   // Guard accesses to the JNI function table override.
   static Mutex* jni_function_table_lock_ ACQUIRED_AFTER(jni_weak_globals_lock_);
 
+  // When declaring any Mutex add BOTTOM_MUTEX_ACQUIRED_AFTER to use annotalysis to check the code
+  // doesn't try to acquire a higher level Mutex. NB Due to the way the annotalysis works this
+  // actually only encodes the mutex being below jni_function_table_lock_ although having
+  // kGenericBottomLock level is lower than this.
+  #define BOTTOM_MUTEX_ACQUIRED_AFTER ACQUIRED_AFTER(art::Locks::jni_function_table_lock_)
+
   // Have an exclusive aborting thread.
   static Mutex* abort_lock_ ACQUIRED_AFTER(jni_function_table_lock_);
 
diff --git a/runtime/instrumentation.cc b/runtime/instrumentation.cc
index d25893f..4196e19 100644
--- a/runtime/instrumentation.cc
+++ b/runtime/instrumentation.cc
@@ -162,7 +162,7 @@
       have_branch_listeners_(false),
       have_invoke_virtual_or_interface_listeners_(false),
       have_exception_handled_listeners_(false),
-      deoptimized_methods_lock_("deoptimized methods lock", kDeoptimizedMethodsLock),
+      deoptimized_methods_lock_("deoptimized methods lock", kGenericBottomLock),
       deoptimization_enabled_(false),
       interpreter_handler_table_(kMainHandlerTable),
       quick_alloc_entry_points_instrumentation_counter_(0),
@@ -225,14 +225,7 @@
     if ((forced_interpret_only_ || IsDeoptimized(method)) && !method->IsNative()) {
       new_quick_code = GetQuickToInterpreterBridge();
     } else if (is_class_initialized || !method->IsStatic() || method->IsConstructor()) {
-      // It would be great to search the JIT for its implementation here but we cannot due to the
-      // locks we hold. Instead just set to the interpreter bridge and that code will search the JIT
-      // when it gets called and replace the entrypoint then.
-      if (NeedDebugVersionFor(method)) {
-        new_quick_code = GetQuickToInterpreterBridge();
-      } else {
-        new_quick_code = class_linker->GetQuickOatCodeFor(method);
-      }
+      new_quick_code = GetCodeForInvoke(method);
     } else {
       new_quick_code = GetQuickResolutionStub();
     }
diff --git a/runtime/instrumentation.h b/runtime/instrumentation.h
index 0e366d3..e5d8800 100644
--- a/runtime/instrumentation.h
+++ b/runtime/instrumentation.h
@@ -720,7 +720,7 @@
 
   // The set of methods being deoptimized (by the debugger) which must be executed with interpreter
   // only.
-  mutable ReaderWriterMutex deoptimized_methods_lock_ DEFAULT_MUTEX_ACQUIRED_AFTER;
+  mutable ReaderWriterMutex deoptimized_methods_lock_ BOTTOM_MUTEX_ACQUIRED_AFTER;
   std::unordered_set<ArtMethod*> deoptimized_methods_ GUARDED_BY(deoptimized_methods_lock_);
   bool deoptimization_enabled_;
 
diff --git a/runtime/jit/jit_code_cache.cc b/runtime/jit/jit_code_cache.cc
index 52262f4..86582ef 100644
--- a/runtime/jit/jit_code_cache.cc
+++ b/runtime/jit/jit_code_cache.cc
@@ -507,21 +507,31 @@
   return stack_map_data - ComputeRootTableSize(GetNumberOfRoots(stack_map_data));
 }
 
-static void FillRootTable(uint8_t* roots_data, Handle<mirror::ObjectArray<mirror::Object>> roots)
-    REQUIRES_SHARED(Locks::mutator_lock_) {
+static void DCheckRootsAreValid(Handle<mirror::ObjectArray<mirror::Object>> roots)
+    REQUIRES(!Locks::intern_table_lock_) REQUIRES_SHARED(Locks::mutator_lock_) {
+  if (!kIsDebugBuild) {
+    return;
+  }
+  const uint32_t length = roots->GetLength();
+  // Put all roots in `roots_data`.
+  for (uint32_t i = 0; i < length; ++i) {
+    ObjPtr<mirror::Object> object = roots->Get(i);
+    // Ensure the string is strongly interned. b/32995596
+    if (object->IsString()) {
+      ObjPtr<mirror::String> str = ObjPtr<mirror::String>::DownCast(object);
+      ClassLinker* class_linker = Runtime::Current()->GetClassLinker();
+      CHECK(class_linker->GetInternTable()->LookupStrong(Thread::Current(), str) != nullptr);
+    }
+  }
+}
+
+void JitCodeCache::FillRootTable(uint8_t* roots_data,
+                                 Handle<mirror::ObjectArray<mirror::Object>> roots) {
   GcRoot<mirror::Object>* gc_roots = reinterpret_cast<GcRoot<mirror::Object>*>(roots_data);
   const uint32_t length = roots->GetLength();
   // Put all roots in `roots_data`.
   for (uint32_t i = 0; i < length; ++i) {
     ObjPtr<mirror::Object> object = roots->Get(i);
-    if (kIsDebugBuild) {
-      // Ensure the string is strongly interned. b/32995596
-      if (object->IsString()) {
-        ObjPtr<mirror::String> str = ObjPtr<mirror::String>::DownCast(object);
-        ClassLinker* class_linker = Runtime::Current()->GetClassLinker();
-        CHECK(class_linker->GetInternTable()->LookupStrong(Thread::Current(), str) != nullptr);
-      }
-    }
     gc_roots[i] = GcRoot<mirror::Object>(object);
   }
 }
@@ -853,6 +863,12 @@
           single_impl, method, method_header);
     }
 
+    if (!method->IsNative()) {
+      // We need to do this before grabbing the lock_ because it needs to be able to see the string
+      // InternTable. Native methods do not have roots.
+      DCheckRootsAreValid(roots);
+    }
+
     // The following needs to be guarded by cha_lock_ also. Otherwise it's
     // possible that the compiled code is considered invalidated by some class linking,
     // but below we still make the compiled code valid for the method.
diff --git a/runtime/jit/jit_code_cache.h b/runtime/jit/jit_code_cache.h
index 19de978..ee6111a 100644
--- a/runtime/jit/jit_code_cache.h
+++ b/runtime/jit/jit_code_cache.h
@@ -315,6 +315,11 @@
       REQUIRES(!lock_)
       REQUIRES_SHARED(Locks::mutator_lock_);
 
+  // Adds the given roots to the roots_data. Only a member for annotalysis.
+  void FillRootTable(uint8_t* roots_data, Handle<mirror::ObjectArray<mirror::Object>> roots)
+      REQUIRES(lock_)
+      REQUIRES_SHARED(Locks::mutator_lock_);
+
   ProfilingInfo* AddProfilingInfoInternal(Thread* self,
                                           ArtMethod* method,
                                           const std::vector<uint32_t>& entries)
@@ -391,7 +396,7 @@
   class JniStubData;
 
   // Lock for guarding allocations, collections, and the method_code_map_.
-  Mutex lock_;
+  Mutex lock_ BOTTOM_MUTEX_ACQUIRED_AFTER;
   // Condition to wait on during collection.
   ConditionVariable lock_cond_ GUARDED_BY(lock_);
   // Whether there is a code cache collection in progress.