Merge "Add dex item for hiddenapi flags"
diff --git a/adbconnection/adbconnection.cc b/adbconnection/adbconnection.cc
index 2050133..ba25393 100644
--- a/adbconnection/adbconnection.cc
+++ b/adbconnection/adbconnection.cc
@@ -20,6 +20,7 @@
 
 #include "android-base/endian.h"
 #include "android-base/stringprintf.h"
+#include "base/file_utils.h"
 #include "base/logging.h"
 #include "base/macros.h"
 #include "base/mutex.h"
@@ -428,11 +429,11 @@
   cmsg->cmsg_type  = SCM_RIGHTS;
 
   // Duplicate the fds before sending them.
-  android::base::unique_fd read_fd(dup(adb_connection_socket_));
+  android::base::unique_fd read_fd(art::DupCloexec(adb_connection_socket_));
   CHECK_NE(read_fd.get(), -1) << "Failed to dup read_fd_: " << strerror(errno);
-  android::base::unique_fd write_fd(dup(adb_connection_socket_));
+  android::base::unique_fd write_fd(art::DupCloexec(adb_connection_socket_));
   CHECK_NE(write_fd.get(), -1) << "Failed to dup write_fd: " << strerror(errno);
-  android::base::unique_fd write_lock_fd(dup(adb_write_event_fd_));
+  android::base::unique_fd write_lock_fd(art::DupCloexec(adb_write_event_fd_));
   CHECK_NE(write_lock_fd.get(), -1) << "Failed to dup write_lock_fd: " << strerror(errno);
 
   dt_fd_forward::FdSet {
diff --git a/build/Android.bp b/build/Android.bp
index 47a540d..09d3a18 100644
--- a/build/Android.bp
+++ b/build/Android.bp
@@ -18,6 +18,8 @@
 }
 
 art_clang_tidy_errors = [
+    "android-cloexec-dup",
+    "android-cloexec-open",
     "bugprone-argument-comment",
     "bugprone-lambda-function-name",
     "bugprone-unused-raii",  // Protect scoped things like MutexLock.
@@ -35,7 +37,9 @@
     "misc-unused-using-decls",
 ]
 // Should be: strings.Join(art_clang_tidy_errors, ",").
-art_clang_tidy_errors_str = "bugprone-argument-comment"
+art_clang_tidy_errors_str = "android-cloexec-dup"
+        + ",android-cloexec-open"
+        + ",bugprone-argument-comment"
         + ",bugprone-lambda-function-name"
         + ",bugprone-unused-raii"
         + ",bugprone-unused-return-value"
diff --git a/compiler/utils/assembler_thumb_test_expected.cc.inc b/compiler/utils/assembler_thumb_test_expected.cc.inc
index 85e4326..0d279ed 100644
--- a/compiler/utils/assembler_thumb_test_expected.cc.inc
+++ b/compiler/utils/assembler_thumb_test_expected.cc.inc
@@ -76,7 +76,7 @@
   "  f0:	f1bc 0f00 	cmp.w	ip, #0\n",
   "  f4:	bf18      	it	ne\n",
   "  f6:	f20d 4c01 	addwne	ip, sp, #1025	; 0x401\n",
-  "  fa:	f8d9 c08c 	ldr.w	ip, [r9, #140]	; 0x8c\n",
+  "  fa:	f8d9 c094 	ldr.w	ip, [r9, #148]	; 0x94\n",
   "  fe:	f1bc 0f00 	cmp.w	ip, #0\n",
   " 102:	d171      	bne.n	1e8 <VixlJniHelpers+0x1e8>\n",
   " 104:	f8cd c7ff 	str.w	ip, [sp, #2047]	; 0x7ff\n",
@@ -153,7 +153,7 @@
   " 21c:	f8d9 8034 	ldr.w	r8, [r9, #52]	; 0x34\n",
   " 220:	4770      	bx	lr\n",
   " 222:	4660      	mov	r0, ip\n",
-  " 224:	f8d9 c2d4 	ldr.w	ip, [r9, #724]	; 0x2d4\n",
+  " 224:	f8d9 c2dc 	ldr.w	ip, [r9, #732]	; 0x2dc\n",
   " 228:	47e0      	blx	ip\n",
   nullptr
 };
diff --git a/dex2oat/dex2oat_test.cc b/dex2oat/dex2oat_test.cc
index 6f53861..fbad1af 100644
--- a/dex2oat/dex2oat_test.cc
+++ b/dex2oat/dex2oat_test.cc
@@ -634,7 +634,9 @@
                        const std::string& dex_location,
                        size_t num_classes,
                        uint32_t checksum) {
-    int profile_test_fd = open(test_profile.c_str(), O_CREAT | O_TRUNC | O_WRONLY, 0644);
+    int profile_test_fd = open(test_profile.c_str(),
+                               O_CREAT | O_TRUNC | O_WRONLY | O_CLOEXEC,
+                               0644);
     CHECK_GE(profile_test_fd, 0);
 
     ProfileCompilationInfo info;
@@ -1698,7 +1700,7 @@
   // Create a multidex file with only one dex that gets rejected for cdex conversion.
   ScratchFile apk_file;
   {
-    FILE* file = fdopen(dup(apk_file.GetFd()), "w+b");
+    FILE* file = fdopen(DupCloexec(apk_file.GetFd()), "w+b");
     ZipWriter writer(file);
     // Add vdex to zip.
     writer.StartEntry("classes.dex", ZipWriter::kCompress);
@@ -1837,7 +1839,7 @@
     std::unique_ptr<File> vdex_file(OS::OpenFileForReading(vdex_location.c_str()));
     ASSERT_TRUE(vdex_file != nullptr);
     ASSERT_GT(vdex_file->GetLength(), 0u);
-    FILE* file = fdopen(dup(dm_file.GetFd()), "w+b");
+    FILE* file = fdopen(DupCloexec(dm_file.GetFd()), "w+b");
     ZipWriter writer(file);
     auto write_all_bytes = [&](File* file) {
       std::unique_ptr<uint8_t[]> bytes(new uint8_t[file->GetLength()]);
@@ -1963,7 +1965,7 @@
 TEST_F(Dex2oatTest, CompactDexInvalidSource) {
   ScratchFile invalid_dex;
   {
-    FILE* file = fdopen(dup(invalid_dex.GetFd()), "w+b");
+    FILE* file = fdopen(DupCloexec(invalid_dex.GetFd()), "w+b");
     ZipWriter writer(file);
     writer.StartEntry("classes.dex", ZipWriter::kAlign32);
     DexFile::Header header = {};
@@ -2005,7 +2007,7 @@
   // Create a zip containing the invalid dex.
   ScratchFile invalid_dex_zip;
   {
-    FILE* file = fdopen(dup(invalid_dex_zip.GetFd()), "w+b");
+    FILE* file = fdopen(DupCloexec(invalid_dex_zip.GetFd()), "w+b");
     ZipWriter writer(file);
     writer.StartEntry("classes.dex", ZipWriter::kCompress);
     ASSERT_GE(writer.WriteBytes(&header, sizeof(header)), 0);
diff --git a/dex2oat/linker/image_writer.cc b/dex2oat/linker/image_writer.cc
index 6410c7a..ddc9f43 100644
--- a/dex2oat/linker/image_writer.cc
+++ b/dex2oat/linker/image_writer.cc
@@ -27,7 +27,6 @@
 
 #include "art_field-inl.h"
 #include "art_method-inl.h"
-#include "base/bit_memory_region.h"
 #include "base/callee_save_type.h"
 #include "base/enums.h"
 #include "base/globals.h"
@@ -88,14 +87,6 @@
 namespace art {
 namespace linker {
 
-static inline size_t RelocationIndex(size_t relocation_offset, PointerSize target_ptr_size) {
-  static_assert(sizeof(GcRoot<mirror::Object>) == sizeof(mirror::HeapReference<mirror::Object>),
-                "Expecting heap GC roots and references to have the same size.");
-  DCHECK_LE(sizeof(GcRoot<mirror::Object>), static_cast<size_t>(target_ptr_size));
-  DCHECK_ALIGNED(relocation_offset, sizeof(GcRoot<mirror::Object>));
-  return relocation_offset / sizeof(GcRoot<mirror::Object>);
-}
-
 static ArrayRef<const uint8_t> MaybeCompressData(ArrayRef<const uint8_t> source,
                                                  ImageHeader::StorageMode image_storage_mode,
                                                  /*out*/ std::vector<uint8_t>* storage) {
@@ -672,22 +663,6 @@
       return false;
     }
 
-    // Write out relocations.
-    size_t relocations_position_in_file = bitmap_position_in_file + bitmap_section.Size();
-    ArrayRef<const uint8_t> relocations = MaybeCompressData(
-        ArrayRef<const uint8_t>(image_info.relocation_bitmap_),
-        image_storage_mode_,
-        &compressed_data);
-    image_header->sections_[ImageHeader::kSectionImageRelocations] =
-        ImageSection(bitmap_section.Offset() + bitmap_section.Size(), relocations.size());
-    if (!image_file->PwriteFully(relocations.data(),
-                                 relocations.size(),
-                                 relocations_position_in_file)) {
-      PLOG(ERROR) << "Failed to write image file relocations " << image_filename;
-      image_file->Erase();
-      return false;
-    }
-
     int err = image_file->Flush();
     if (err < 0) {
       PLOG(ERROR) << "Failed to flush image file " << image_filename << " with result " << err;
@@ -708,9 +683,7 @@
     }
 
     if (VLOG_IS_ON(compiler)) {
-      size_t separately_written_section_size = bitmap_section.Size() +
-                                               image_header->GetImageRelocationsSection().Size() +
-                                               sizeof(ImageHeader);
+      size_t separately_written_section_size = bitmap_section.Size() + sizeof(ImageHeader);
 
       size_t total_uncompressed_size = raw_image_data.size() + separately_written_section_size,
              total_compressed_size   = image_data.size() + separately_written_section_size;
@@ -721,7 +694,7 @@
       }
     }
 
-    CHECK_EQ(relocations_position_in_file + relocations.size(),
+    CHECK_EQ(bitmap_position_in_file + bitmap_section.Size(),
              static_cast<size_t>(image_file->GetLength()));
 
     if (image_file->FlushCloseOrErase() != 0) {
@@ -2366,8 +2339,6 @@
   const size_t bitmap_bytes = image_info.image_bitmap_->Size();
   auto* bitmap_section = &sections[ImageHeader::kSectionImageBitmap];
   *bitmap_section = ImageSection(RoundUp(image_end, kPageSize), RoundUp(bitmap_bytes, kPageSize));
-  // The relocations section shall be finished later as we do not know its actual size yet.
-
   if (VLOG_IS_ON(compiler)) {
     LOG(INFO) << "Creating header for " << oat_filenames_[oat_index];
     size_t idx = 0;
@@ -2394,7 +2365,7 @@
 
   // Create the header, leave 0 for data size since we will fill this in as we are writing the
   // image.
-  ImageHeader* header = new (image_info.image_.Begin()) ImageHeader(
+  new (image_info.image_.Begin()) ImageHeader(
       PointerToLowMemUInt32(image_info.image_begin_),
       image_end,
       sections.data(),
@@ -2411,28 +2382,6 @@
       static_cast<uint32_t>(target_ptr_size_),
       image_storage_mode_,
       /*data_size*/0u);
-
-  // Resize relocation bitmap for recording reference/pointer relocations.
-  size_t number_of_relocation_locations = RelocationIndex(image_end, target_ptr_size_);
-  DCHECK(image_info.relocation_bitmap_.empty());
-  image_info.relocation_bitmap_.resize(
-      BitsToBytesRoundUp(number_of_relocation_locations * (compile_app_image_ ? 2u : 1u)));
-  // Record header relocations.
-  RecordImageRelocation(&header->image_begin_, oat_index);
-  RecordImageRelocation(&header->oat_file_begin_, oat_index);
-  RecordImageRelocation(&header->oat_data_begin_, oat_index);
-  RecordImageRelocation(&header->oat_data_end_, oat_index);
-  RecordImageRelocation(&header->oat_file_end_, oat_index);
-  if (compile_app_image_) {
-    RecordImageRelocation(&header->boot_image_begin_, oat_index, /* app_to_boot_image */ true);
-    RecordImageRelocation(&header->boot_oat_begin_, oat_index, /* app_to_boot_image */ true);
-  } else {
-    DCHECK_EQ(header->boot_image_begin_, 0u);
-    DCHECK_EQ(header->boot_oat_begin_, 0u);
-  }
-  RecordImageRelocation(&header->image_roots_, oat_index);
-  // Skip non-null check for `patch_delta_` as it is actually 0 but still needs to be recorded.
-  RecordImageRelocation</* kCheckNotNull */ false>(&header->patch_delta_, oat_index);
 }
 
 ArtMethod* ImageWriter::GetImageMethodAddress(ArtMethod* method) {
@@ -2492,28 +2441,23 @@
   ImageWriter* const image_writer_;
 };
 
-void ImageWriter::CopyAndFixupImTable(ImTable* orig, ImTable* copy, size_t oat_index) {
+void ImageWriter::CopyAndFixupImTable(ImTable* orig, ImTable* copy) {
   for (size_t i = 0; i < ImTable::kSize; ++i) {
     ArtMethod* method = orig->Get(i, target_ptr_size_);
     void** address = reinterpret_cast<void**>(copy->AddressOfElement(i, target_ptr_size_));
-    CopyAndFixupPointer(address, method, oat_index);
+    CopyAndFixupPointer(address, method);
     DCHECK_EQ(copy->Get(i, target_ptr_size_), NativeLocationInImage(method));
   }
 }
 
-void ImageWriter::CopyAndFixupImtConflictTable(ImtConflictTable* orig,
-                                               ImtConflictTable* copy,
-                                               size_t oat_index) {
+void ImageWriter::CopyAndFixupImtConflictTable(ImtConflictTable* orig, ImtConflictTable* copy) {
   const size_t count = orig->NumEntries(target_ptr_size_);
   for (size_t i = 0; i < count; ++i) {
     ArtMethod* interface_method = orig->GetInterfaceMethod(i, target_ptr_size_);
     ArtMethod* implementation_method = orig->GetImplementationMethod(i, target_ptr_size_);
-    CopyAndFixupPointer(copy->AddressOfInterfaceMethod(i, target_ptr_size_),
-                        interface_method,
-                        oat_index);
-    CopyAndFixupPointer(copy->AddressOfImplementationMethod(i, target_ptr_size_),
-                        implementation_method,
-                        oat_index);
+    CopyAndFixupPointer(copy->AddressOfInterfaceMethod(i, target_ptr_size_), interface_method);
+    CopyAndFixupPointer(
+        copy->AddressOfImplementationMethod(i, target_ptr_size_), implementation_method);
     DCHECK_EQ(copy->GetInterfaceMethod(i, target_ptr_size_),
               NativeLocationInImage(interface_method));
     DCHECK_EQ(copy->GetImplementationMethod(i, target_ptr_size_),
@@ -2538,8 +2482,7 @@
         memcpy(dest, pair.first, sizeof(ArtField));
         CopyAndFixupReference(
             reinterpret_cast<ArtField*>(dest)->GetDeclaringClassAddressWithoutBarrier(),
-            reinterpret_cast<ArtField*>(pair.first)->GetDeclaringClass(),
-            oat_index);
+            reinterpret_cast<ArtField*>(pair.first)->GetDeclaringClass());
         break;
       }
       case NativeObjectRelocationType::kRuntimeMethod:
@@ -2572,15 +2515,14 @@
       case NativeObjectRelocationType::kIMTable: {
         ImTable* orig_imt = reinterpret_cast<ImTable*>(pair.first);
         ImTable* dest_imt = reinterpret_cast<ImTable*>(dest);
-        CopyAndFixupImTable(orig_imt, dest_imt, oat_index);
+        CopyAndFixupImTable(orig_imt, dest_imt);
         break;
       }
       case NativeObjectRelocationType::kIMTConflictTable: {
         auto* orig_table = reinterpret_cast<ImtConflictTable*>(pair.first);
         CopyAndFixupImtConflictTable(
             orig_table,
-            new(dest)ImtConflictTable(orig_table->NumEntries(target_ptr_size_), target_ptr_size_),
-            oat_index);
+            new(dest)ImtConflictTable(orig_table->NumEntries(target_ptr_size_), target_ptr_size_));
         break;
       }
     }
@@ -2590,10 +2532,8 @@
   for (size_t i = 0; i < ImageHeader::kImageMethodsCount; ++i) {
     ArtMethod* method = image_methods_[i];
     CHECK(method != nullptr);
-    CopyAndFixupPointer(reinterpret_cast<void**>(&image_header->image_methods_[i]),
-                        method,
-                        oat_index,
-                        PointerSize::k32);
+    CopyAndFixupPointer(
+        reinterpret_cast<void**>(&image_header->image_methods_[i]), method, PointerSize::k32);
   }
   FixupRootVisitor root_visitor(this);
 
@@ -2618,9 +2558,6 @@
     MutexLock lock(Thread::Current(), *Locks::intern_table_lock_);
     DCHECK(!temp_intern_table.strong_interns_.tables_.empty());
     DCHECK(!temp_intern_table.strong_interns_.tables_[0].empty());  // Inserted at the beginning.
-    for (const GcRoot<mirror::String>& slot : temp_intern_table.strong_interns_.tables_[0]) {
-      RecordImageRelocation(&slot, oat_index);
-    }
   }
   // Write the class table(s) into the image. class_table_bytes_ may be 0 if there are multiple
   // class loaders. Writing multiple class tables into the image is currently unsupported.
@@ -2649,9 +2586,6 @@
     ReaderMutexLock lock(self, temp_class_table.lock_);
     DCHECK(!temp_class_table.classes_.empty());
     DCHECK(!temp_class_table.classes_[0].empty());  // The ClassSet was inserted at the beginning.
-    for (const ClassTable::TableSlot& slot : temp_class_table.classes_[0]) {
-      RecordImageRelocation(&slot, oat_index);
-    }
   }
 }
 
@@ -2668,15 +2602,13 @@
 void ImageWriter::FixupPointerArray(mirror::Object* dst,
                                     mirror::PointerArray* arr,
                                     mirror::Class* klass,
-                                    Bin array_type,
-                                    size_t oat_index) {
+                                    Bin array_type) {
   CHECK(klass->IsArrayClass());
   CHECK(arr->IsIntArray() || arr->IsLongArray()) << klass->PrettyClass() << " " << arr;
   // Fixup int and long pointers for the ArtMethod or ArtField arrays.
   const size_t num_elements = arr->GetLength();
-  CopyAndFixupReference(dst->GetFieldObjectReferenceAddr<kVerifyNone>(Class::ClassOffset()),
-                        arr->GetClass(),
-                        oat_index);
+  CopyAndFixupReference(
+      dst->GetFieldObjectReferenceAddr<kVerifyNone>(Class::ClassOffset()), arr->GetClass());
   auto* dest_array = down_cast<mirror::PointerArray*>(dst);
   for (size_t i = 0, count = num_elements; i < count; ++i) {
     void* elem = arr->GetElementPtrSize<void*>(i, target_ptr_size_);
@@ -2698,7 +2630,7 @@
         UNREACHABLE();
       }
     }
-    CopyAndFixupPointer(dest_array->ElementAddress(i, target_ptr_size_), elem, oat_index);
+    CopyAndFixupPointer(dest_array->ElementAddress(i, target_ptr_size_), elem);
   }
 }
 
@@ -2729,14 +2661,14 @@
     // safe since we mark all of the objects that may reference non immune objects as gray.
     CHECK(dst->AtomicSetMarkBit(0, 1));
   }
-  FixupObject(obj, dst, oat_index);
+  FixupObject(obj, dst);
 }
 
 // Rewrite all the references in the copied object to point to their image address equivalent
 class ImageWriter::FixupVisitor {
  public:
-  FixupVisitor(ImageWriter* image_writer, Object* copy, size_t oat_index)
-      : image_writer_(image_writer), copy_(copy), oat_index_(oat_index) {
+  FixupVisitor(ImageWriter* image_writer, Object* copy)
+      : image_writer_(image_writer), copy_(copy) {
   }
 
   // Ignore class roots since we don't have a way to map them to the destination. These are handled
@@ -2751,9 +2683,7 @@
     ObjPtr<Object> ref = obj->GetFieldObject<Object, kVerifyNone>(offset);
     // Copy the reference and record the fixup if necessary.
     image_writer_->CopyAndFixupReference(
-        copy_->GetFieldObjectReferenceAddr<kVerifyNone>(offset),
-        ref.Ptr(),
-        oat_index_);
+        copy_->GetFieldObjectReferenceAddr<kVerifyNone>(offset), ref);
   }
 
   // java.lang.ref.Reference visitor.
@@ -2766,13 +2696,12 @@
  protected:
   ImageWriter* const image_writer_;
   mirror::Object* const copy_;
-  size_t oat_index_;
 };
 
 class ImageWriter::FixupClassVisitor final : public FixupVisitor {
  public:
-  FixupClassVisitor(ImageWriter* image_writer, Object* copy, size_t oat_index)
-      : FixupVisitor(image_writer, copy, oat_index) {}
+  FixupClassVisitor(ImageWriter* image_writer, Object* copy)
+      : FixupVisitor(image_writer, copy) {}
 
   void operator()(ObjPtr<Object> obj, MemberOffset offset, bool is_static ATTRIBUTE_UNUSED) const
       REQUIRES(Locks::mutator_lock_, Locks::heap_bitmap_lock_) {
@@ -2828,14 +2757,13 @@
 
 class ImageWriter::NativeLocationVisitor {
  public:
-  NativeLocationVisitor(ImageWriter* image_writer, size_t oat_index)
-      : image_writer_(image_writer),
-        oat_index_(oat_index) {}
+  explicit NativeLocationVisitor(ImageWriter* image_writer)
+      : image_writer_(image_writer) {}
 
   template <typename T>
   T* operator()(T* ptr, void** dest_addr) const REQUIRES_SHARED(Locks::mutator_lock_) {
     if (ptr != nullptr) {
-      image_writer_->CopyAndFixupPointer(dest_addr, ptr, oat_index_);
+      image_writer_->CopyAndFixupPointer(dest_addr, ptr);
     }
     // TODO: The caller shall overwrite the value stored by CopyAndFixupPointer()
     // with the value we return here. We should try to avoid the duplicate work.
@@ -2844,12 +2772,11 @@
 
  private:
   ImageWriter* const image_writer_;
-  const size_t oat_index_;
 };
 
-void ImageWriter::FixupClass(mirror::Class* orig, mirror::Class* copy, size_t oat_index) {
-  orig->FixupNativePointers(copy, target_ptr_size_, NativeLocationVisitor(this, oat_index));
-  FixupClassVisitor visitor(this, copy, oat_index);
+void ImageWriter::FixupClass(mirror::Class* orig, mirror::Class* copy) {
+  orig->FixupNativePointers(copy, target_ptr_size_, NativeLocationVisitor(this));
+  FixupClassVisitor visitor(this, copy);
   ObjPtr<mirror::Object>(orig)->VisitReferences(visitor, visitor);
 
   if (kBitstringSubtypeCheckEnabled && compile_app_image_) {
@@ -2877,7 +2804,7 @@
   copy->SetClinitThreadId(static_cast<pid_t>(0));
 }
 
-void ImageWriter::FixupObject(Object* orig, Object* copy, size_t oat_index) {
+void ImageWriter::FixupObject(Object* orig, Object* copy) {
   DCHECK(orig != nullptr);
   DCHECK(copy != nullptr);
   if (kUseBakerReadBarrier) {
@@ -2889,13 +2816,13 @@
     auto it = pointer_arrays_.find(down_cast<mirror::PointerArray*>(orig));
     if (it != pointer_arrays_.end()) {
       // Should only need to fixup every pointer array exactly once.
-      FixupPointerArray(copy, down_cast<mirror::PointerArray*>(orig), klass, it->second, oat_index);
+      FixupPointerArray(copy, down_cast<mirror::PointerArray*>(orig), klass, it->second);
       pointer_arrays_.erase(it);
       return;
     }
   }
   if (orig->IsClass()) {
-    FixupClass(orig->AsClass<kVerifyNone>(), down_cast<mirror::Class*>(copy), oat_index);
+    FixupClass(orig->AsClass<kVerifyNone>(), down_cast<mirror::Class*>(copy));
   } else {
     ObjPtr<mirror::ObjectArray<mirror::Class>> class_roots =
         Runtime::Current()->GetClassLinker()->GetClassRoots();
@@ -2905,11 +2832,9 @@
       auto* dest = down_cast<mirror::Executable*>(copy);
       auto* src = down_cast<mirror::Executable*>(orig);
       ArtMethod* src_method = src->GetArtMethod();
-      CopyAndFixupPointer(dest, mirror::Executable::ArtMethodOffset(), src_method, oat_index);
+      CopyAndFixupPointer(dest, mirror::Executable::ArtMethodOffset(), src_method);
     } else if (klass == GetClassRoot<mirror::DexCache>(class_roots)) {
-      FixupDexCache(down_cast<mirror::DexCache*>(orig),
-                    down_cast<mirror::DexCache*>(copy),
-                    oat_index);
+      FixupDexCache(down_cast<mirror::DexCache*>(orig), down_cast<mirror::DexCache*>(copy));
     } else if (klass->IsClassLoaderClass()) {
       mirror::ClassLoader* copy_loader = down_cast<mirror::ClassLoader*>(copy);
       // If src is a ClassLoader, set the class table to null so that it gets recreated by the
@@ -2920,7 +2845,7 @@
       // roots.
       copy_loader->SetAllocator(nullptr);
     }
-    FixupVisitor visitor(this, copy, oat_index);
+    FixupVisitor visitor(this, copy);
     orig->VisitReferences(visitor, visitor);
   }
 }
@@ -2928,8 +2853,7 @@
 template <typename T>
 void ImageWriter::FixupDexCacheArrayEntry(std::atomic<mirror::DexCachePair<T>>* orig_array,
                                           std::atomic<mirror::DexCachePair<T>>* new_array,
-                                          uint32_t array_index,
-                                          size_t oat_index) {
+                                          uint32_t array_index) {
   static_assert(sizeof(std::atomic<mirror::DexCachePair<T>>) == sizeof(mirror::DexCachePair<T>),
                 "Size check for removing std::atomic<>.");
   mirror::DexCachePair<T>* orig_pair =
@@ -2937,15 +2861,14 @@
   mirror::DexCachePair<T>* new_pair =
       reinterpret_cast<mirror::DexCachePair<T>*>(&new_array[array_index]);
   CopyAndFixupReference(
-      new_pair->object.AddressWithoutBarrier(), orig_pair->object.Read(), oat_index);
+      new_pair->object.AddressWithoutBarrier(), orig_pair->object.Read());
   new_pair->index = orig_pair->index;
 }
 
 template <typename T>
 void ImageWriter::FixupDexCacheArrayEntry(std::atomic<mirror::NativeDexCachePair<T>>* orig_array,
                                           std::atomic<mirror::NativeDexCachePair<T>>* new_array,
-                                          uint32_t array_index,
-                                          size_t oat_index) {
+                                          uint32_t array_index) {
   static_assert(
       sizeof(std::atomic<mirror::NativeDexCachePair<T>>) == sizeof(mirror::NativeDexCachePair<T>),
       "Size check for removing std::atomic<>.");
@@ -2956,9 +2879,8 @@
         reinterpret_cast<DexCache::ConversionPair64*>(new_array) + array_index;
     *new_pair = *orig_pair;  // Copy original value and index.
     if (orig_pair->first != 0u) {
-      CopyAndFixupPointer(reinterpret_cast<void**>(&new_pair->first),
-                          reinterpret_cast64<void*>(orig_pair->first),
-                          oat_index);
+      CopyAndFixupPointer(
+          reinterpret_cast<void**>(&new_pair->first), reinterpret_cast64<void*>(orig_pair->first));
     }
   } else {
     DexCache::ConversionPair32* orig_pair =
@@ -2967,26 +2889,22 @@
         reinterpret_cast<DexCache::ConversionPair32*>(new_array) + array_index;
     *new_pair = *orig_pair;  // Copy original value and index.
     if (orig_pair->first != 0u) {
-      CopyAndFixupPointer(reinterpret_cast<void**>(&new_pair->first),
-                          reinterpret_cast32<void*>(orig_pair->first),
-                          oat_index);
+      CopyAndFixupPointer(
+          reinterpret_cast<void**>(&new_pair->first), reinterpret_cast32<void*>(orig_pair->first));
     }
   }
 }
 
 void ImageWriter::FixupDexCacheArrayEntry(GcRoot<mirror::CallSite>* orig_array,
                                           GcRoot<mirror::CallSite>* new_array,
-                                          uint32_t array_index,
-                                          size_t oat_index) {
-  CopyAndFixupReference(new_array[array_index].AddressWithoutBarrier(),
-                        orig_array[array_index].Read(),
-                        oat_index);
+                                          uint32_t array_index) {
+  CopyAndFixupReference(
+      new_array[array_index].AddressWithoutBarrier(), orig_array[array_index].Read());
 }
 
 template <typename EntryType>
 void ImageWriter::FixupDexCacheArray(DexCache* orig_dex_cache,
                                      DexCache* copy_dex_cache,
-                                     size_t oat_index,
                                      MemberOffset array_offset,
                                      uint32_t size) {
   EntryType* orig_array = orig_dex_cache->GetFieldPtr64<EntryType*>(array_offset);
@@ -2994,45 +2912,37 @@
   if (orig_array != nullptr) {
     // Though the DexCache array fields are usually treated as native pointers, we clear
     // the top 32 bits for 32-bit targets.
-    CopyAndFixupPointer(copy_dex_cache, array_offset, orig_array, oat_index, PointerSize::k64);
+    CopyAndFixupPointer(copy_dex_cache, array_offset, orig_array, PointerSize::k64);
     EntryType* new_array = NativeCopyLocation(orig_array);
     for (uint32_t i = 0; i != size; ++i) {
-      FixupDexCacheArrayEntry(orig_array, new_array, i, oat_index);
+      FixupDexCacheArrayEntry(orig_array, new_array, i);
     }
   }
 }
 
-void ImageWriter::FixupDexCache(DexCache* orig_dex_cache,
-                                DexCache* copy_dex_cache,
-                                size_t oat_index) {
+void ImageWriter::FixupDexCache(DexCache* orig_dex_cache, DexCache* copy_dex_cache) {
   FixupDexCacheArray<mirror::StringDexCacheType>(orig_dex_cache,
                                                  copy_dex_cache,
-                                                 oat_index,
                                                  DexCache::StringsOffset(),
                                                  orig_dex_cache->NumStrings());
   FixupDexCacheArray<mirror::TypeDexCacheType>(orig_dex_cache,
                                                copy_dex_cache,
-                                               oat_index,
                                                DexCache::ResolvedTypesOffset(),
                                                orig_dex_cache->NumResolvedTypes());
   FixupDexCacheArray<mirror::MethodDexCacheType>(orig_dex_cache,
                                                  copy_dex_cache,
-                                                 oat_index,
                                                  DexCache::ResolvedMethodsOffset(),
                                                  orig_dex_cache->NumResolvedMethods());
   FixupDexCacheArray<mirror::FieldDexCacheType>(orig_dex_cache,
                                                 copy_dex_cache,
-                                                oat_index,
                                                 DexCache::ResolvedFieldsOffset(),
                                                 orig_dex_cache->NumResolvedFields());
   FixupDexCacheArray<mirror::MethodTypeDexCacheType>(orig_dex_cache,
                                                      copy_dex_cache,
-                                                     oat_index,
                                                      DexCache::ResolvedMethodTypesOffset(),
                                                      orig_dex_cache->NumResolvedMethodTypes());
   FixupDexCacheArray<GcRoot<mirror::CallSite>>(orig_dex_cache,
                                                copy_dex_cache,
-                                               oat_index,
                                                DexCache::ResolvedCallSitesOffset(),
                                                orig_dex_cache->NumResolvedCallSites());
 
@@ -3141,9 +3051,8 @@
 
   memcpy(copy, orig, ArtMethod::Size(target_ptr_size_));
 
-  CopyAndFixupReference(copy->GetDeclaringClassAddressWithoutBarrier(),
-                        orig->GetDeclaringClassUnchecked(),
-                        oat_index);
+  CopyAndFixupReference(
+      copy->GetDeclaringClassAddressWithoutBarrier(), orig->GetDeclaringClassUnchecked());
 
   // OatWriter replaces the code_ with an offset value. Here we re-adjust to a pointer relative to
   // oat_begin_
@@ -3156,7 +3065,7 @@
     if (orig_table != nullptr) {
       // Special IMT conflict method, normal IMT conflict method or unimplemented IMT method.
       quick_code = GetOatAddress(StubType::kQuickIMTConflictTrampoline);
-      CopyAndFixupPointer(copy, ArtMethod::DataOffset(target_ptr_size_), orig_table, oat_index);
+      CopyAndFixupPointer(copy, ArtMethod::DataOffset(target_ptr_size_), orig_table);
     } else if (UNLIKELY(orig == runtime->GetResolutionMethod())) {
       quick_code = GetOatAddress(StubType::kQuickResolutionTrampoline);
     } else {
@@ -3190,9 +3099,6 @@
         // Note this is not the code_ pointer, that is handled above.
         copy->SetEntryPointFromJniPtrSize(
             GetOatAddress(StubType::kJNIDlsymLookup), target_ptr_size_);
-        MemberOffset offset = ArtMethod::EntryPointFromJniOffset(target_ptr_size_);
-        const void* dest = reinterpret_cast<const uint8_t*>(copy) + offset.Uint32Value();
-        RecordImageRelocation(dest, oat_index, /* app_to_boot_image */ compile_app_image_);
       } else {
         CHECK(copy->GetDataPtrSize(target_ptr_size_) == nullptr);
       }
@@ -3200,9 +3106,6 @@
   }
   if (quick_code != nullptr) {
     copy->SetEntryPointFromQuickCompiledCodePtrSize(quick_code, target_ptr_size_);
-    MemberOffset offset = ArtMethod::EntryPointFromQuickCompiledCodeOffset(target_ptr_size_);
-    const void* dest = reinterpret_cast<const uint8_t*>(copy) + offset.Uint32Value();
-    RecordImageRelocation(dest, oat_index, /* app_to_boot_image */ IsInBootOatFile(quick_code));
   }
 }
 
@@ -3366,55 +3269,15 @@
     : intern_table_(new InternTable),
       class_table_(new ClassTable) {}
 
-template <bool kCheckNotNull /* = true */>
-void ImageWriter::RecordImageRelocation(const void* dest,
-                                        size_t oat_index,
-                                        bool app_to_boot_image /* = false */) {
-  // Check that we're not recording a relocation for null.
-  if (kCheckNotNull) {
-    DCHECK(reinterpret_cast<const uint32_t*>(dest)[0] != 0u);
-  }
-  // Calculate the offset within the image.
-  ImageInfo* image_info = &image_infos_[oat_index];
-  DCHECK(image_info->image_.HasAddress(dest))
-      << "MemMap range " << static_cast<const void*>(image_info->image_.Begin())
-      << "-" << static_cast<const void*>(image_info->image_.End())
-      << " does not contain " << dest;
-  size_t offset = reinterpret_cast<const uint8_t*>(dest) - image_info->image_.Begin();
-  ImageHeader* const image_header = reinterpret_cast<ImageHeader*>(image_info->image_.Begin());
-  size_t image_end = image_header->GetClassTableSection().End();
-  DCHECK_LT(offset, image_end);
-  // Calculate the location index.
-  size_t size = RelocationIndex(image_end, target_ptr_size_);
-  size_t index = RelocationIndex(offset, target_ptr_size_);
-  if (app_to_boot_image) {
-    index += size;
-  }
-  // Mark the location in the bitmap.
-  DCHECK(compile_app_image_ || !app_to_boot_image);
-  MemoryRegion region(image_info->relocation_bitmap_.data(), image_info->relocation_bitmap_.size());
-  BitMemoryRegion bit_region(region, /* bit_offset */ 0u, compile_app_image_ ? 2u * size : size);
-  DCHECK(!bit_region.LoadBit(index));
-  bit_region.StoreBit(index, /* value*/ true);
-}
-
 template <typename DestType>
-void ImageWriter::CopyAndFixupReference(DestType* dest,
-                                        ObjPtr<mirror::Object> src,
-                                        size_t oat_index) {
+void ImageWriter::CopyAndFixupReference(DestType* dest, ObjPtr<mirror::Object> src) {
   static_assert(std::is_same<DestType, mirror::CompressedReference<mirror::Object>>::value ||
                     std::is_same<DestType, mirror::HeapReference<mirror::Object>>::value,
                 "DestType must be a Compressed-/HeapReference<Object>.");
   dest->Assign(GetImageAddress(src.Ptr()));
-  if (src != nullptr) {
-    RecordImageRelocation(dest, oat_index, /* app_to_boot_image */ IsInBootImage(src.Ptr()));
-  }
 }
 
-void ImageWriter::CopyAndFixupPointer(void** target,
-                                      void* value,
-                                      size_t oat_index,
-                                      PointerSize pointer_size) {
+void ImageWriter::CopyAndFixupPointer(void** target, void* value, PointerSize pointer_size) {
   void* new_value = NativeLocationInImage(value);
   if (pointer_size == PointerSize::k32) {
     *reinterpret_cast<uint32_t*>(target) = reinterpret_cast32<uint32_t>(new_value);
@@ -3422,24 +3285,22 @@
     *reinterpret_cast<uint64_t*>(target) = reinterpret_cast64<uint64_t>(new_value);
   }
   DCHECK(value != nullptr);
-  RecordImageRelocation(target, oat_index, /* app_to_boot_image */ IsInBootImage(value));
 }
 
-void ImageWriter::CopyAndFixupPointer(void** target, void* value, size_t oat_index)
+void ImageWriter::CopyAndFixupPointer(void** target, void* value)
     REQUIRES_SHARED(Locks::mutator_lock_) {
-  CopyAndFixupPointer(target, value, oat_index, target_ptr_size_);
+  CopyAndFixupPointer(target, value, target_ptr_size_);
 }
 
 void ImageWriter::CopyAndFixupPointer(
-    void* object, MemberOffset offset, void* value, size_t oat_index, PointerSize pointer_size) {
+    void* object, MemberOffset offset, void* value, PointerSize pointer_size) {
   void** target =
       reinterpret_cast<void**>(reinterpret_cast<uint8_t*>(object) + offset.Uint32Value());
-  return CopyAndFixupPointer(target, value, oat_index, pointer_size);
+  return CopyAndFixupPointer(target, value, pointer_size);
 }
 
-void ImageWriter::CopyAndFixupPointer(
-    void* object, MemberOffset offset, void* value, size_t oat_index) {
-  return CopyAndFixupPointer(object, offset, value, oat_index, target_ptr_size_);
+void ImageWriter::CopyAndFixupPointer(void* object, MemberOffset offset, void* value) {
+  return CopyAndFixupPointer(object, offset, value, target_ptr_size_);
 }
 
 }  // namespace linker
diff --git a/dex2oat/linker/image_writer.h b/dex2oat/linker/image_writer.h
index 93e4be5..6f43527 100644
--- a/dex2oat/linker/image_writer.h
+++ b/dex2oat/linker/image_writer.h
@@ -289,7 +289,7 @@
      * Creates ImageSection objects that describe most of the sections of a
      * boot or AppImage.  The following sections are not included:
      *   - ImageHeader::kSectionImageBitmap
-     *   - ImageHeader::kSectionImageRelocations
+     *   - ImageHeader::kSectionStringReferenceOffsets
      *
      * In addition, the ImageHeader is not covered here.
      *
@@ -397,12 +397,6 @@
 
     // Class table associated with this image for serialization.
     std::unique_ptr<ClassTable> class_table_;
-
-    // Relocations of references/pointers. For boot image, it contains one bit
-    // for each location that can be relocated. For app image, it contains twice
-    // that many bits, first half contains relocations within this image and the
-    // second half contains relocations for references to the boot image.
-    std::vector<uint8_t> relocation_bitmap_;
   };
 
   // We use the lock word to store the offset of the object in the image.
@@ -496,11 +490,9 @@
   void CopyAndFixupObject(mirror::Object* obj) REQUIRES_SHARED(Locks::mutator_lock_);
   void CopyAndFixupMethod(ArtMethod* orig, ArtMethod* copy, size_t oat_index)
       REQUIRES_SHARED(Locks::mutator_lock_);
-  void CopyAndFixupImTable(ImTable* orig, ImTable* copy, size_t oat_index)
+  void CopyAndFixupImTable(ImTable* orig, ImTable* copy)
       REQUIRES_SHARED(Locks::mutator_lock_);
-  void CopyAndFixupImtConflictTable(ImtConflictTable* orig,
-                                    ImtConflictTable* copy,
-                                    size_t oat_index)
+  void CopyAndFixupImtConflictTable(ImtConflictTable* orig, ImtConflictTable* copy)
       REQUIRES_SHARED(Locks::mutator_lock_);
 
   /*
@@ -517,45 +509,37 @@
    */
   void CopyMetadata();
 
-  template <bool kCheckNotNull = true>
-  void RecordImageRelocation(const void* dest, size_t oat_index, bool app_to_boot_image = false);
-  void FixupClass(mirror::Class* orig, mirror::Class* copy, size_t oat_index)
+  void FixupClass(mirror::Class* orig, mirror::Class* copy)
       REQUIRES_SHARED(Locks::mutator_lock_);
-  void FixupObject(mirror::Object* orig, mirror::Object* copy, size_t oat_index)
+  void FixupObject(mirror::Object* orig, mirror::Object* copy)
       REQUIRES_SHARED(Locks::mutator_lock_);
   template <typename T>
   void FixupDexCacheArrayEntry(std::atomic<mirror::DexCachePair<T>>* orig_array,
                                std::atomic<mirror::DexCachePair<T>>* new_array,
-                               uint32_t array_index,
-                               size_t oat_index)
+                               uint32_t array_index)
       REQUIRES_SHARED(Locks::mutator_lock_);
   template <typename T>
   void FixupDexCacheArrayEntry(std::atomic<mirror::NativeDexCachePair<T>>* orig_array,
                                std::atomic<mirror::NativeDexCachePair<T>>* new_array,
-                               uint32_t array_index,
-                               size_t oat_index)
+                               uint32_t array_index)
       REQUIRES_SHARED(Locks::mutator_lock_);
   void FixupDexCacheArrayEntry(GcRoot<mirror::CallSite>* orig_array,
                                GcRoot<mirror::CallSite>* new_array,
-                               uint32_t array_index,
-                               size_t oat_index)
+                               uint32_t array_index)
       REQUIRES_SHARED(Locks::mutator_lock_);
   template <typename EntryType>
   void FixupDexCacheArray(mirror::DexCache* orig_dex_cache,
                           mirror::DexCache* copy_dex_cache,
-                          size_t oat_index,
                           MemberOffset array_offset,
                           uint32_t size)
       REQUIRES_SHARED(Locks::mutator_lock_);
   void FixupDexCache(mirror::DexCache* orig_dex_cache,
-                     mirror::DexCache* copy_dex_cache,
-                     size_t oat_index)
+                     mirror::DexCache* copy_dex_cache)
       REQUIRES_SHARED(Locks::mutator_lock_);
   void FixupPointerArray(mirror::Object* dst,
                          mirror::PointerArray* arr,
                          mirror::Class* klass,
-                         Bin array_type,
-                         size_t oat_index)
+                         Bin array_type)
       REQUIRES_SHARED(Locks::mutator_lock_);
 
   // Get quick code for non-resolution/imt_conflict/abstract method.
@@ -711,18 +695,18 @@
 
   // Copy a reference and record image relocation.
   template <typename DestType>
-  void CopyAndFixupReference(DestType* dest, ObjPtr<mirror::Object> src, size_t oat_index)
+  void CopyAndFixupReference(DestType* dest, ObjPtr<mirror::Object> src)
       REQUIRES_SHARED(Locks::mutator_lock_);
 
   // Copy a native pointer and record image relocation.
-  void CopyAndFixupPointer(void** target, void* value, size_t oat_index, PointerSize pointer_size)
+  void CopyAndFixupPointer(void** target, void* value, PointerSize pointer_size)
       REQUIRES_SHARED(Locks::mutator_lock_);
-  void CopyAndFixupPointer(void** target, void* value, size_t oat_index)
+  void CopyAndFixupPointer(void** target, void* value)
       REQUIRES_SHARED(Locks::mutator_lock_);
   void CopyAndFixupPointer(
-      void* object, MemberOffset offset, void* value, size_t oat_index, PointerSize pointer_size)
+      void* object, MemberOffset offset, void* value, PointerSize pointer_size)
       REQUIRES_SHARED(Locks::mutator_lock_);
-  void CopyAndFixupPointer(void* object, MemberOffset offset, void* value, size_t oat_index)
+  void CopyAndFixupPointer(void* object, MemberOffset offset, void* value)
       REQUIRES_SHARED(Locks::mutator_lock_);
 
   /*
diff --git a/dex2oat/linker/oat_writer.cc b/dex2oat/linker/oat_writer.cc
index 750619d..01c24fc 100644
--- a/dex2oat/linker/oat_writer.cc
+++ b/dex2oat/linker/oat_writer.cc
@@ -26,6 +26,7 @@
 #include "base/bit_vector-inl.h"
 #include "base/enums.h"
 #include "base/file_magic.h"
+#include "base/file_utils.h"
 #include "base/indenter.h"
 #include "base/logging.h"  // For VLOG
 #include "base/os.h"
@@ -3425,7 +3426,7 @@
                                     &error_msg);
   } else if (oat_dex_file->source_.IsRawFile()) {
     File* raw_file = oat_dex_file->source_.GetRawFile();
-    int dup_fd = dup(raw_file->Fd());
+    int dup_fd = DupCloexec(raw_file->Fd());
     if (dup_fd < 0) {
       PLOG(ERROR) << "Failed to dup dex file descriptor (" << raw_file->Fd() << ") at " << location;
       return false;
diff --git a/dex2oat/linker/oat_writer_test.cc b/dex2oat/linker/oat_writer_test.cc
index 7382208..83fb17c 100644
--- a/dex2oat/linker/oat_writer_test.cc
+++ b/dex2oat/linker/oat_writer_test.cc
@@ -19,6 +19,7 @@
 #include "arch/instruction_set_features.h"
 #include "art_method-inl.h"
 #include "base/enums.h"
+#include "base/file_utils.h"
 #include "base/stl_util.h"
 #include "base/unix_file/fd_file.h"
 #include "class_linker.h"
@@ -765,7 +766,7 @@
 
   {
     // Test using the AddZipDexFileSource() interface with the zip file handle.
-    File zip_fd(dup(zip_file.GetFd()), /*check_usage=*/ false);
+    File zip_fd(DupCloexec(zip_file.GetFd()), /*check_usage=*/ false);
     ASSERT_NE(-1, zip_fd.Fd());
 
     ScratchFile tmp_base, tmp_oat(tmp_base, ".oat"), tmp_vdex(tmp_base, ".vdex");
diff --git a/dexlayout/dex_visualize.cc b/dexlayout/dex_visualize.cc
index 4a36744..27cec8d9 100644
--- a/dexlayout/dex_visualize.cc
+++ b/dexlayout/dex_visualize.cc
@@ -53,7 +53,7 @@
 
   bool OpenAndPrintHeader(size_t dex_index) {
     // Open the file and emit the gnuplot prologue.
-    out_file_ = fopen(MultidexName("layout", dex_index, ".gnuplot").c_str(), "w");
+    out_file_ = fopen(MultidexName("layout", dex_index, ".gnuplot").c_str(), "we");
     if (out_file_ == nullptr) {
       return false;
     }
diff --git a/dexlayout/dexlayout_main.cc b/dexlayout/dexlayout_main.cc
index d212e71..41b60da 100644
--- a/dexlayout/dexlayout_main.cc
+++ b/dexlayout/dexlayout_main.cc
@@ -190,7 +190,7 @@
   // Open profile file.
   std::unique_ptr<ProfileCompilationInfo> profile_info;
   if (options.profile_file_name_) {
-    int profile_fd = open(options.profile_file_name_, O_RDONLY);
+    int profile_fd = open(options.profile_file_name_, O_RDONLY | O_CLOEXEC);
     if (profile_fd < 0) {
       PLOG(ERROR) << "Can't open " << options.profile_file_name_;
       return 1;
diff --git a/dexlist/dexlist.cc b/dexlist/dexlist.cc
index d3e4633..bdf3ca6 100644
--- a/dexlist/dexlist.cc
+++ b/dexlist/dexlist.cc
@@ -257,7 +257,7 @@
 
   // Open alternative output file.
   if (gOptions.outputFileName) {
-    gOutFile = fopen(gOptions.outputFileName, "w");
+    gOutFile = fopen(gOptions.outputFileName, "we");
     if (!gOutFile) {
       PLOG(ERROR) << "Can't open " << gOptions.outputFileName;
       free(gOptions.argCopy);
diff --git a/dt_fd_forward/dt_fd_forward.cc b/dt_fd_forward/dt_fd_forward.cc
index 116cdf8..a99f785 100644
--- a/dt_fd_forward/dt_fd_forward.cc
+++ b/dt_fd_forward/dt_fd_forward.cc
@@ -105,12 +105,21 @@
   TEMP_FAILURE_RETRY(send(fd, kListenStartMessage, sizeof(kListenStartMessage), MSG_EOR));
 }
 
+// Copy from file_utils, so we do not need to depend on libartbase.
+static int DupCloexec(int fd) {
+#if defined(__linux__)
+  return fcntl(fd, F_DUPFD_CLOEXEC, 0);
+#else
+  return dup(fd);
+#endif
+}
+
 jdwpTransportError FdForwardTransport::SetupListen(int listen_fd) {
   std::lock_guard<std::mutex> lk(state_mutex_);
   if (!ChangeState(TransportState::kClosed, TransportState::kListenSetup)) {
     return ERR(ILLEGAL_STATE);
   } else {
-    listen_fd_.reset(dup(listen_fd));
+    listen_fd_.reset(DupCloexec(listen_fd));
     SendListenMessage(listen_fd_);
     CHECK(ChangeState(TransportState::kListenSetup, TransportState::kListening));
     return OK;
@@ -339,7 +348,7 @@
   write_lock_fd_.reset(out_fds.write_lock_fd_);
 
   // We got the fds. Send ack.
-  close_notify_fd_.reset(dup(listen_fd_));
+  close_notify_fd_.reset(DupCloexec(listen_fd_));
   SendAcceptMessage(close_notify_fd_);
 
   return IOResult::kOk;
diff --git a/libartbase/base/bit_struct.h b/libartbase/base/bit_struct.h
index 9814fd4..292eca0 100644
--- a/libartbase/base/bit_struct.h
+++ b/libartbase/base/bit_struct.h
@@ -274,13 +274,13 @@
 // If a standard-layout union contains several standard-layout structs that share a common
 // initial sequence ... it is permitted to inspect the common initial sequence of any of
 // standard-layout struct members.
-#define BITSTRUCT_DEFINE_START(name, bitwidth)                                 \
-    union name {                                                               \
-      art::detail::DefineBitStructSize<(bitwidth)> _;                          \
-      static constexpr size_t BitStructSizeOf() { return (bitwidth); }         \
-      name& operator=(const name& other) { _ = other._; return *this; }        \
-      name(const name& other) : _(other._) {}                                  \
-      name() = default;                                                        \
+#define BITSTRUCT_DEFINE_START(name, bitwidth)                                        \
+    union name {                                                         /* NOLINT */ \
+      art::detail::DefineBitStructSize<(bitwidth)> _;                                 \
+      static constexpr size_t BitStructSizeOf() { return (bitwidth); }                \
+      name& operator=(const name& other) { _ = other._; return *this; }  /* NOLINT */ \
+      name(const name& other) : _(other._) {}                                         \
+      name() = default;                                                               \
       ~name() = default;
 
 // End the definition of a bitstruct, and insert a sanity check
diff --git a/libartbase/base/common_art_test.cc b/libartbase/base/common_art_test.cc
index 9485fca..987ceb6 100644
--- a/libartbase/base/common_art_test.cc
+++ b/libartbase/base/common_art_test.cc
@@ -62,7 +62,7 @@
     : ScratchFile(other.GetFilename() + suffix) {}
 
 ScratchFile::ScratchFile(const std::string& filename) : filename_(filename) {
-  int fd = open(filename_.c_str(), O_RDWR | O_CREAT, 0666);
+  int fd = open(filename_.c_str(), O_RDWR | O_CREAT | O_CLOEXEC, 0666);
   CHECK_NE(-1, fd);
   file_.reset(new File(fd, GetFilename(), true));
 }
diff --git a/libartbase/base/membarrier.cc b/libartbase/base/membarrier.cc
index 490dbf3..def949e 100644
--- a/libartbase/base/membarrier.cc
+++ b/libartbase/base/membarrier.cc
@@ -29,7 +29,7 @@
 #include <linux/membarrier.h>
 
 #define CHECK_MEMBARRIER_CMD(art_value, membarrier_value) \
-  static_assert(static_cast<int>(art_value) == membarrier_value, "Bad value for " # art_value)
+  static_assert(static_cast<int>(art_value) == (membarrier_value), "Bad value for " # art_value)
 CHECK_MEMBARRIER_CMD(art::MembarrierCommand::kQuery, MEMBARRIER_CMD_QUERY);
 CHECK_MEMBARRIER_CMD(art::MembarrierCommand::kGlobal, MEMBARRIER_CMD_SHARED);
 CHECK_MEMBARRIER_CMD(art::MembarrierCommand::kPrivateExpedited, MEMBARRIER_CMD_PRIVATE_EXPEDITED);
diff --git a/libartbase/base/scoped_flock.cc b/libartbase/base/scoped_flock.cc
index beee501..2f16fb2 100644
--- a/libartbase/base/scoped_flock.cc
+++ b/libartbase/base/scoped_flock.cc
@@ -22,6 +22,7 @@
 #include <android-base/logging.h>
 #include <android-base/stringprintf.h>
 
+#include "file_utils.h"
 #include "unix_file/fd_file.h"
 
 namespace art {
@@ -98,7 +99,7 @@
   // destructor. Callers should explicitly flush files they're writing to if
   // that is the desired behaviour.
   ScopedFlock locked_file(
-      new LockedFile(dup(fd), path, /* check_usage= */ false, read_only_mode));
+      new LockedFile(DupCloexec(fd), path, /* check_usage= */ false, read_only_mode));
   if (locked_file->Fd() == -1) {
     *error_msg = StringPrintf("Failed to duplicate open file '%s': %s",
                               locked_file->GetPath().c_str(), strerror(errno));
diff --git a/libartbase/base/unix_file/fd_file.cc b/libartbase/base/unix_file/fd_file.cc
index de60277..76894c6 100644
--- a/libartbase/base/unix_file/fd_file.cc
+++ b/libartbase/base/unix_file/fd_file.cc
@@ -431,7 +431,7 @@
   bool is_current = false;
   {
     struct stat this_stat, current_stat;
-    int cur_fd = TEMP_FAILURE_RETRY(open(file_path_.c_str(), O_RDONLY));
+    int cur_fd = TEMP_FAILURE_RETRY(open(file_path_.c_str(), O_RDONLY | O_CLOEXEC));
     if (cur_fd > 0) {
       // File still exists.
       if (fstat(fd_, &this_stat) == 0 && fstat(cur_fd, &current_stat) == 0) {
diff --git a/libartbase/base/unix_file/fd_file_test.cc b/libartbase/base/unix_file/fd_file_test.cc
index 9c39bb5..3a9cf59 100644
--- a/libartbase/base/unix_file/fd_file_test.cc
+++ b/libartbase/base/unix_file/fd_file_test.cc
@@ -15,6 +15,7 @@
  */
 
 #include "base/common_art_test.h"  // For ScratchFile
+#include "base/file_utils.h"
 #include "gtest/gtest.h"
 #include "fd_file.h"
 #include "random_access_file_test.h"
@@ -25,7 +26,7 @@
  protected:
   RandomAccessFile* MakeTestFile() override {
     FILE* tmp = tmpfile();
-    int fd = dup(fileno(tmp));
+    int fd = art::DupCloexec(fileno(tmp));
     fclose(tmp);
     return new FdFile(fd, false);
   }
diff --git a/libartbase/base/zip_archive_test.cc b/libartbase/base/zip_archive_test.cc
index b923881..969cf12 100644
--- a/libartbase/base/zip_archive_test.cc
+++ b/libartbase/base/zip_archive_test.cc
@@ -23,6 +23,7 @@
 #include <memory>
 
 #include "base/common_art_test.h"
+#include "file_utils.h"
 #include "os.h"
 #include "unix_file/fd_file.h"
 
@@ -41,7 +42,7 @@
 
   ScratchFile tmp;
   ASSERT_NE(-1, tmp.GetFd());
-  std::unique_ptr<File> file(new File(dup(tmp.GetFd()), tmp.GetFilename(), false));
+  std::unique_ptr<File> file(new File(DupCloexec(tmp.GetFd()), tmp.GetFilename(), false));
   ASSERT_TRUE(file.get() != nullptr);
   bool success = zip_entry->ExtractToFile(*file, &error_msg);
   ASSERT_TRUE(success) << error_msg;
@@ -49,7 +50,7 @@
   file.reset(nullptr);
 
   uint32_t computed_crc = crc32(0L, Z_NULL, 0);
-  int fd = open(tmp.GetFilename().c_str(), O_RDONLY);
+  int fd = open(tmp.GetFilename().c_str(), O_RDONLY | O_CLOEXEC);
   ASSERT_NE(-1, fd);
   const size_t kBufSize = 32768;
   uint8_t buf[kBufSize];
diff --git a/oatdump/oatdump.cc b/oatdump/oatdump.cc
index a82fa3f..358b7ba 100644
--- a/oatdump/oatdump.cc
+++ b/oatdump/oatdump.cc
@@ -1951,7 +1951,6 @@
     const auto& intern_section = image_header_.GetInternedStringsSection();
     const auto& class_table_section = image_header_.GetClassTableSection();
     const auto& bitmap_section = image_header_.GetImageBitmapSection();
-    const auto& relocations_section = image_header_.GetImageRelocationsSection();
 
     stats_.header_bytes = header_bytes;
 
@@ -1991,11 +1990,7 @@
     CHECK_ALIGNED(bitmap_section.Offset(), kPageSize);
     stats_.alignment_bytes += RoundUp(bitmap_offset, kPageSize) - bitmap_offset;
 
-    // There should be no space between the bitmap and relocations.
-    CHECK_EQ(bitmap_section.Offset() + bitmap_section.Size(), relocations_section.Offset());
-
     stats_.bitmap_bytes += bitmap_section.Size();
-    stats_.relocations_bytes += relocations_section.Size();
     stats_.art_field_bytes += field_section.Size();
     stats_.art_method_bytes += method_section.Size();
     stats_.dex_cache_arrays_bytes += dex_cache_arrays_section.Size();
@@ -2428,7 +2423,6 @@
     size_t interned_strings_bytes;
     size_t class_table_bytes;
     size_t bitmap_bytes;
-    size_t relocations_bytes;
     size_t alignment_bytes;
 
     size_t managed_code_bytes;
@@ -2458,7 +2452,6 @@
           interned_strings_bytes(0),
           class_table_bytes(0),
           bitmap_bytes(0),
-          relocations_bytes(0),
           alignment_bytes(0),
           managed_code_bytes(0),
           managed_code_bytes_ignoring_deduplication(0),
@@ -2622,7 +2615,6 @@
                                   "interned_string_bytes  =  %8zd (%2.0f%% of art file bytes)\n"
                                   "class_table_bytes      =  %8zd (%2.0f%% of art file bytes)\n"
                                   "bitmap_bytes           =  %8zd (%2.0f%% of art file bytes)\n"
-                                  "relocations_bytes      =  %8zd (%2.0f%% of art file bytes)\n"
                                   "alignment_bytes        =  %8zd (%2.0f%% of art file bytes)\n\n",
                                   header_bytes, PercentOfFileBytes(header_bytes),
                                   object_bytes, PercentOfFileBytes(object_bytes),
@@ -2634,13 +2626,12 @@
                                   PercentOfFileBytes(interned_strings_bytes),
                                   class_table_bytes, PercentOfFileBytes(class_table_bytes),
                                   bitmap_bytes, PercentOfFileBytes(bitmap_bytes),
-                                  relocations_bytes, PercentOfFileBytes(relocations_bytes),
                                   alignment_bytes, PercentOfFileBytes(alignment_bytes))
             << std::flush;
         CHECK_EQ(file_bytes,
                  header_bytes + object_bytes + art_field_bytes + art_method_bytes +
                  dex_cache_arrays_bytes + interned_strings_bytes + class_table_bytes +
-                 bitmap_bytes + relocations_bytes + alignment_bytes);
+                 bitmap_bytes + alignment_bytes);
       }
 
       os << "object_bytes breakdown:\n";
diff --git a/openjdkjvmti/Android.bp b/openjdkjvmti/Android.bp
index d8902d6..7621d48 100644
--- a/openjdkjvmti/Android.bp
+++ b/openjdkjvmti/Android.bp
@@ -41,6 +41,7 @@
         "ti_field.cc",
         "ti_heap.cc",
         "ti_jni.cc",
+        "ti_logging.cc",
         "ti_method.cc",
         "ti_monitor.cc",
         "ti_object.cc",
diff --git a/openjdkjvmti/OpenjdkJvmTi.cc b/openjdkjvmti/OpenjdkJvmTi.cc
index 4bc33b6..a2fabbf 100644
--- a/openjdkjvmti/OpenjdkJvmTi.cc
+++ b/openjdkjvmti/OpenjdkJvmTi.cc
@@ -58,6 +58,7 @@
 #include "ti_field.h"
 #include "ti_heap.h"
 #include "ti_jni.h"
+#include "ti_logging.h"
 #include "ti_method.h"
 #include "ti_monitor.h"
 #include "ti_object.h"
@@ -787,7 +788,7 @@
                                                      classes,
                                                      &error_msg);
     if (res != OK) {
-      LOG(WARNING) << "FAILURE TO RETRANFORM " << error_msg;
+      JVMTI_LOG(WARNING, env) << "FAILURE TO RETRANFORM " << error_msg;
     }
     return res;
   }
@@ -806,7 +807,7 @@
                                                 class_definitions,
                                                 &error_msg);
     if (res != OK) {
-      LOG(WARNING) << "FAILURE TO REDEFINE " << error_msg;
+      JVMTI_LOG(WARNING, env) << "FAILURE TO REDEFINE " << error_msg;
     }
     return res;
   }
@@ -1489,7 +1490,8 @@
       local_data(nullptr),
       ti_version(version),
       capabilities(),
-      event_info_mutex_("jvmtiEnv_EventInfoMutex") {
+      event_info_mutex_("jvmtiEnv_EventInfoMutex"),
+      last_error_mutex_("jvmtiEnv_LastErrorMutex", art::LockLevel::kGenericBottomLock) {
   object_tag_table = std::unique_ptr<ObjectTagTable>(new ObjectTagTable(event_handler, this));
   functions = &gJvmtiInterface;
 }
diff --git a/openjdkjvmti/art_jvmti.h b/openjdkjvmti/art_jvmti.h
index 1218e3b..7433e54 100644
--- a/openjdkjvmti/art_jvmti.h
+++ b/openjdkjvmti/art_jvmti.h
@@ -102,6 +102,10 @@
   // RW lock to protect access to all of the event data.
   art::ReaderWriterMutex event_info_mutex_ DEFAULT_MUTEX_ACQUIRED_AFTER;
 
+  std::string last_error_ GUARDED_BY(last_error_mutex_);
+  // Lock to touch the last-error-message.
+  art::Mutex last_error_mutex_ BOTTOM_MUTEX_ACQUIRED_AFTER;
+
   ArtJvmTiEnv(art::JavaVMExt* runtime, EventHandler* event_handler, jint ti_version);
 
   static ArtJvmTiEnv* AsArtJvmTiEnv(jvmtiEnv* env) {
diff --git a/openjdkjvmti/events-inl.h b/openjdkjvmti/events-inl.h
index ca66556..8e06fe3 100644
--- a/openjdkjvmti/events-inl.h
+++ b/openjdkjvmti/events-inl.h
@@ -25,6 +25,7 @@
 #include "events.h"
 #include "jni/jni_internal.h"
 #include "nativehelper/scoped_local_ref.h"
+#include "runtime-inl.h"
 #include "scoped_thread_state_change-inl.h"
 #include "stack.h"
 #include "ti_breakpoint.h"
diff --git a/openjdkjvmti/ti_class.cc b/openjdkjvmti/ti_class.cc
index 3d33487..e469270 100644
--- a/openjdkjvmti/ti_class.cc
+++ b/openjdkjvmti/ti_class.cc
@@ -73,6 +73,7 @@
 #include "thread_list.h"
 #include "ti_class_definition.h"
 #include "ti_class_loader-inl.h"
+#include "ti_logging.h"
 #include "ti_phase.h"
 #include "ti_redefine.h"
 #include "transform.h"
@@ -932,8 +933,8 @@
     return ERR(ILLEGAL_ARGUMENT);
   } else if (!jnienv->IsInstanceOf(loader,
                                    art::WellKnownClasses::dalvik_system_BaseDexClassLoader)) {
-    LOG(ERROR) << "GetClassLoaderClassDescriptors is only implemented for BootClassPath and "
-               << "dalvik.system.BaseDexClassLoader class loaders";
+    JVMTI_LOG(ERROR, env) << "GetClassLoaderClassDescriptors is only implemented for "
+                          << "BootClassPath and dalvik.system.BaseDexClassLoader class loaders";
     // TODO Possibly return OK With no classes would  be better since these ones cannot have any
     // real classes associated with them.
     return ERR(NOT_IMPLEMENTED);
diff --git a/openjdkjvmti/ti_ddms.cc b/openjdkjvmti/ti_ddms.cc
index bf063fa..9de5cbc 100644
--- a/openjdkjvmti/ti_ddms.cc
+++ b/openjdkjvmti/ti_ddms.cc
@@ -39,6 +39,7 @@
 #include "debugger.h"
 #include "scoped_thread_state_change-inl.h"
 #include "thread-inl.h"
+#include "ti_logging.h"
 
 namespace openjdkjvmti {
 
@@ -69,7 +70,7 @@
                                 data_arr,
                                 /*out*/reinterpret_cast<uint32_t*>(type_out),
                                 /*out*/&out_data)) {
-    LOG(WARNING) << "Something went wrong with handling the ddm chunk.";
+    JVMTI_LOG(WARNING, env) << "Something went wrong with handling the ddm chunk.";
     return ERR(INTERNAL);
   } else {
     jvmtiError error = OK;
diff --git a/openjdkjvmti/ti_extension.cc b/openjdkjvmti/ti_extension.cc
index c628a32..5d39884 100644
--- a/openjdkjvmti/ti_extension.cc
+++ b/openjdkjvmti/ti_extension.cc
@@ -39,7 +39,9 @@
 #include "ti_class.h"
 #include "ti_ddms.h"
 #include "ti_heap.h"
+#include "ti_logging.h"
 #include "ti_monitor.h"
+
 #include "thread-inl.h"
 
 namespace openjdkjvmti {
@@ -272,6 +274,44 @@
   if (error != ERR(NONE)) {
     return error;
   }
+
+  // GetLastError extension
+  error = add_extension(
+      reinterpret_cast<jvmtiExtensionFunction>(LogUtil::GetLastError),
+      "com.android.art.misc.get_last_error_message",
+      "In some cases the jvmti plugin will log data about errors to the android logcat. These can"
+      " be useful to tools so we make (some) of the messages available here as well. This will"
+      " fill the given 'msg' buffer with the last non-fatal message associated with this"
+      " jvmti-env. Note this is best-effort only, not all log messages will be accessible through"
+      " this API. This will return the last error-message from all threads. Care should be taken"
+      " interpreting the return value when used with a multi-threaded program. The error message"
+      " will only be cleared by a call to 'com.android.art.misc.clear_last_error_message' and will"
+      " not be cleared by intervening successful calls. If no (tracked) error message has been"
+      " sent since the last call to clear_last_error_message this API will return"
+      " JVMTI_ERROR_ABSENT_INFORMATION. Not all failures will cause an error message to be"
+      " recorded.",
+      {
+          { "msg", JVMTI_KIND_ALLOC_BUF, JVMTI_TYPE_CCHAR, false },
+      },
+      {
+        ERR(NULL_POINTER),
+        ERR(ABSENT_INFORMATION),
+      });
+  if (error != ERR(NONE)) {
+    return error;
+  }
+
+  // ClearLastError extension
+  error = add_extension(
+      reinterpret_cast<jvmtiExtensionFunction>(LogUtil::ClearLastError),
+      "com.android.art.misc.clear_last_error_message",
+      "Clears the error message returned by 'com.android.art.misc.get_last_error_message'.",
+      { },
+      { });
+  if (error != ERR(NONE)) {
+    return error;
+  }
+
   // Copy into output buffer.
 
   *extension_count_ptr = ext_vector.size();
diff --git a/openjdkjvmti/ti_logging.cc b/openjdkjvmti/ti_logging.cc
new file mode 100644
index 0000000..1d24d3b
--- /dev/null
+++ b/openjdkjvmti/ti_logging.cc
@@ -0,0 +1,71 @@
+/* Copyright (C) 2018 The Android Open Source Project
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This file implements interfaces from the file jvmti.h. This implementation
+ * is licensed under the same terms as the file jvmti.h.  The
+ * copyright and license information for the file jvmti.h follows.
+ *
+ * Copyright (c) 2003, 2011, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.  Oracle designates this
+ * particular file as subject to the "Classpath" exception as provided
+ * by Oracle in the LICENSE file that accompanied this code.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ */
+
+#include "ti_logging.h"
+
+#include "art_jvmti.h"
+
+#include "base/mutex.h"
+#include "thread-current-inl.h"
+
+namespace openjdkjvmti {
+
+jvmtiError LogUtil::GetLastError(jvmtiEnv* env, char** data) {
+  if (env == nullptr || data == nullptr) {
+    return ERR(INVALID_ENVIRONMENT);
+  }
+  ArtJvmTiEnv* tienv = ArtJvmTiEnv::AsArtJvmTiEnv(env);
+  art::MutexLock mu(art::Thread::Current(), tienv->last_error_mutex_);
+  if (tienv->last_error_.empty()) {
+    return ERR(ABSENT_INFORMATION);
+  }
+  char* out;
+  jvmtiError err = tienv->Allocate(tienv->last_error_.size() + 1,
+                                   reinterpret_cast<unsigned char**>(&out));
+  if (err != OK) {
+    return err;
+  }
+  strcpy(out, tienv->last_error_.c_str());
+  *data = out;
+  return OK;
+}
+
+jvmtiError LogUtil::ClearLastError(jvmtiEnv* env) {
+  if (env == nullptr) {
+    return ERR(INVALID_ENVIRONMENT);
+  }
+  ArtJvmTiEnv* tienv = ArtJvmTiEnv::AsArtJvmTiEnv(env);
+  art::MutexLock mu(art::Thread::Current(), tienv->last_error_mutex_);
+  tienv->last_error_.clear();
+  return OK;
+}
+
+}  // namespace openjdkjvmti
diff --git a/openjdkjvmti/ti_logging.h b/openjdkjvmti/ti_logging.h
new file mode 100644
index 0000000..31b51bb
--- /dev/null
+++ b/openjdkjvmti/ti_logging.h
@@ -0,0 +1,102 @@
+/* Copyright (C) 2018 The Android Open Source Project
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This file implements interfaces from the file jvmti.h. This implementation
+ * is licensed under the same terms as the file jvmti.h.  The
+ * copyright and license information for the file jvmti.h follows.
+ *
+ * Copyright (c) 2003, 2011, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.  Oracle designates this
+ * particular file as subject to the "Classpath" exception as provided
+ * by Oracle in the LICENSE file that accompanied this code.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ */
+
+#ifndef ART_OPENJDKJVMTI_TI_LOGGING_H_
+#define ART_OPENJDKJVMTI_TI_LOGGING_H_
+
+#include "art_jvmti.h"
+
+#include <ostream>
+#include <sstream>
+
+#include "base/logging.h"
+#include "base/macros.h"
+#include "base/mutex.h"
+#include "thread-current-inl.h"
+
+namespace openjdkjvmti {
+
+// NB Uses implementation details of android-base/logging.h.
+#define JVMTI_LOG(severity, env)                             \
+  ::openjdkjvmti::JvmtiLogMessage((env),                     \
+                                  __FILE__,                  \
+                                  __LINE__,                  \
+                                  ::android::base::DEFAULT,  \
+                                  SEVERITY_LAMBDA(severity), \
+                                  _LOG_TAG_INTERNAL,         \
+                                  -1)
+
+class JvmtiLogMessage {
+ public:
+  JvmtiLogMessage(jvmtiEnv* env,
+                  const char* file,
+                  unsigned int line,
+                  android::base::LogId id,
+                  android::base::LogSeverity severity,
+                  const char* tag,
+                  int error)
+      : env_(ArtJvmTiEnv::AsArtJvmTiEnv(env)),
+        real_log_(file, line, id, severity, tag, error),
+        real_log_stream_(real_log_.stream()) {
+    DCHECK(env_ != nullptr);
+  }
+
+  ~JvmtiLogMessage() {
+    art::MutexLock mu(art::Thread::Current(), env_->last_error_mutex_);
+    env_->last_error_ = save_stream_.str();
+  }
+
+  template<typename T>
+  JvmtiLogMessage& operator<<(T t) {
+    (real_log_stream_ << t);
+    (save_stream_ << t);
+    return *this;
+  }
+
+ private:
+  ArtJvmTiEnv* env_;
+  android::base::LogMessage real_log_;
+  // Lifetime of real_log_stream_ is lifetime of real_log_.
+  std::ostream& real_log_stream_;
+  std::ostringstream save_stream_;
+
+  DISALLOW_COPY_AND_ASSIGN(JvmtiLogMessage);
+};
+
+class LogUtil {
+ public:
+  static jvmtiError ClearLastError(jvmtiEnv* env);
+  static jvmtiError GetLastError(jvmtiEnv* env, char** data);
+};
+
+}  // namespace openjdkjvmti
+
+#endif  // ART_OPENJDKJVMTI_TI_LOGGING_H_
diff --git a/openjdkjvmti/ti_search.cc b/openjdkjvmti/ti_search.cc
index 427869e..2187825 100644
--- a/openjdkjvmti/ti_search.cc
+++ b/openjdkjvmti/ti_search.cc
@@ -52,6 +52,7 @@
 #include "scoped_thread_state_change-inl.h"
 #include "thread-current-inl.h"
 #include "thread_list.h"
+#include "ti_logging.h"
 #include "ti_phase.h"
 #include "well_known_classes.h"
 
@@ -213,7 +214,7 @@
   runtime->GetRuntimeCallbacks()->RemoveRuntimePhaseCallback(&gSearchCallback);
 }
 
-jvmtiError SearchUtil::AddToBootstrapClassLoaderSearch(jvmtiEnv* env ATTRIBUTE_UNUSED,
+jvmtiError SearchUtil::AddToBootstrapClassLoaderSearch(jvmtiEnv* env,
                                                        const char* segment) {
   art::Runtime* current = art::Runtime::Current();
   if (current == nullptr) {
@@ -235,7 +236,8 @@
                             /* verify_checksum= */ true,
                             &error_msg,
                             &dex_files)) {
-    LOG(WARNING) << "Could not open " << segment << " for boot classpath extension: " << error_msg;
+    JVMTI_LOG(WARNING, env) << "Could not open " << segment << " for boot classpath extension: "
+                            << error_msg;
     return ERR(ILLEGAL_ARGUMENT);
   }
 
diff --git a/openjdkjvmti/ti_stack.cc b/openjdkjvmti/ti_stack.cc
index 1279f3b..5de4a81 100644
--- a/openjdkjvmti/ti_stack.cc
+++ b/openjdkjvmti/ti_stack.cc
@@ -57,6 +57,7 @@
 #include "nativehelper/scoped_local_ref.h"
 #include "scoped_thread_state_change-inl.h"
 #include "stack.h"
+#include "ti_logging.h"
 #include "ti_thread.h"
 #include "thread-current-inl.h"
 #include "thread_list.h"
@@ -1097,7 +1098,7 @@
   } while (true);
 }
 
-jvmtiError StackUtil::PopFrame(jvmtiEnv* env ATTRIBUTE_UNUSED, jthread thread) {
+jvmtiError StackUtil::PopFrame(jvmtiEnv* env, jthread thread) {
   art::Thread* self = art::Thread::Current();
   art::Thread* target;
   do {
@@ -1131,9 +1132,10 @@
         tls_data->disable_pop_frame_depth != JvmtiGlobalTLSData::kNoDisallowedPopFrame &&
         tls_data->disable_pop_frame_depth == art::StackVisitor::ComputeNumFrames(target,
                                                                                  kWalkKind)) {
-      LOG(WARNING) << "Disallowing frame pop due to in-progress class-load/prepare. Frame at depth "
-                   << tls_data->disable_pop_frame_depth << " was marked as un-poppable by the "
-                   << "jvmti plugin. See b/117615146 for more information.";
+      JVMTI_LOG(WARNING, env) << "Disallowing frame pop due to in-progress class-load/prepare. "
+                              << "Frame at depth " << tls_data->disable_pop_frame_depth << " was "
+                              << "marked as un-poppable by the jvmti plugin. See b/117615146 for "
+                              << "more information.";
       return ERR(OPAQUE_FRAME);
     }
     // We hold the user_code_suspension_lock_ so the target thread is staying suspended until we are
diff --git a/profman/profile_assistant_test.cc b/profman/profile_assistant_test.cc
index 31dfbc0..e9d3290 100644
--- a/profman/profile_assistant_test.cc
+++ b/profman/profile_assistant_test.cc
@@ -1192,7 +1192,7 @@
 
   // Run profman and pass the dex file with --apk-fd.
   android::base::unique_fd apk_fd(
-      open(GetTestDexFileName("ProfileTestMultiDex").c_str(), O_RDONLY));
+      open(GetTestDexFileName("ProfileTestMultiDex").c_str(), O_RDONLY));  // NOLINT
   ASSERT_GE(apk_fd.get(), 0);
 
   std::string profman_cmd = GetProfmanCmd();
@@ -1270,7 +1270,7 @@
 
   // Run profman and pass the dex file with --apk-fd.
   android::base::unique_fd apk_fd(
-      open(GetTestDexFileName("ProfileTestMultiDex").c_str(), O_RDONLY));
+      open(GetTestDexFileName("ProfileTestMultiDex").c_str(), O_RDONLY));  // NOLINT
   ASSERT_GE(apk_fd.get(), 0);
 
   std::string profman_cmd = GetProfmanCmd();
diff --git a/profman/profman.cc b/profman/profman.cc
index d989c8c..734cdf4 100644
--- a/profman/profman.cc
+++ b/profman/profman.cc
@@ -477,7 +477,7 @@
 
   std::unique_ptr<const ProfileCompilationInfo> LoadProfile(const std::string& filename, int fd) {
     if (!filename.empty()) {
-      fd = open(filename.c_str(), O_RDWR);
+      fd = open(filename.c_str(), O_RDWR | O_CLOEXEC);
       if (fd < 0) {
         LOG(ERROR) << "Cannot open " << filename << strerror(errno);
         return nullptr;
@@ -641,7 +641,7 @@
   bool GetClassNamesAndMethods(const std::string& profile_file,
                                std::vector<std::unique_ptr<const DexFile>>* dex_files,
                                std::set<std::string>* out_lines) {
-    int fd = open(profile_file.c_str(), O_RDONLY);
+    int fd = open(profile_file.c_str(), O_RDONLY | O_CLOEXEC);
     if (!FdIsValid(fd)) {
       LOG(ERROR) << "Cannot open " << profile_file << strerror(errno);
       return false;
@@ -1022,7 +1022,7 @@
     int fd = reference_profile_file_fd_;
     if (!FdIsValid(fd)) {
       CHECK(!reference_profile_file_.empty());
-      fd = open(reference_profile_file_.c_str(), O_CREAT | O_TRUNC | O_WRONLY, 0644);
+      fd = open(reference_profile_file_.c_str(), O_CREAT | O_TRUNC | O_WRONLY | O_CLOEXEC, 0644);
       if (fd < 0) {
         LOG(ERROR) << "Cannot open " << reference_profile_file_ << strerror(errno);
         return kInvalidFd;
@@ -1155,7 +1155,9 @@
       }
     }
     // ShouldGenerateTestProfile confirms !test_profile_.empty().
-    int profile_test_fd = open(test_profile_.c_str(), O_CREAT | O_TRUNC | O_WRONLY, 0644);
+    int profile_test_fd = open(test_profile_.c_str(),
+                               O_CREAT | O_TRUNC | O_WRONLY | O_CLOEXEC,
+                               0644);
     if (profile_test_fd < 0) {
       LOG(ERROR) << "Cannot open " << test_profile_ << strerror(errno);
       return -1;
diff --git a/runtime/arch/arm/entrypoints_init_arm.cc b/runtime/arch/arm/entrypoints_init_arm.cc
index 2c5465e..c1a03ab 100644
--- a/runtime/arch/arm/entrypoints_init_arm.cc
+++ b/runtime/arch/arm/entrypoints_init_arm.cc
@@ -174,7 +174,7 @@
 
   // Read barrier.
   qpoints->pReadBarrierJni = ReadBarrierJni;
-  UpdateReadBarrierEntrypoints(qpoints, /*is_active*/ false);
+  UpdateReadBarrierEntrypoints(qpoints, /*is_active=*/ false);
   qpoints->pReadBarrierMarkReg12 = nullptr;  // Cannot use register 12 (IP) to pass arguments.
   qpoints->pReadBarrierMarkReg13 = nullptr;  // Cannot use register 13 (SP) to pass arguments.
   qpoints->pReadBarrierMarkReg14 = nullptr;  // Cannot use register 14 (LR) to pass arguments.
diff --git a/runtime/arch/arm64/entrypoints_init_arm64.cc b/runtime/arch/arm64/entrypoints_init_arm64.cc
index 4c43b7e..e681d63 100644
--- a/runtime/arch/arm64/entrypoints_init_arm64.cc
+++ b/runtime/arch/arm64/entrypoints_init_arm64.cc
@@ -190,7 +190,7 @@
   // Read barrier.
   qpoints->pReadBarrierJni = ReadBarrierJni;
   qpoints->pReadBarrierMarkReg16 = nullptr;  // IP0 is used as a temp by the asm stub.
-  UpdateReadBarrierEntrypoints(qpoints, /*is_active*/ false);
+  UpdateReadBarrierEntrypoints(qpoints, /*is_active=*/ false);
   qpoints->pReadBarrierSlow = artReadBarrierSlow;
   qpoints->pReadBarrierForRootSlow = artReadBarrierForRootSlow;
 }
diff --git a/runtime/arch/mips/entrypoints_init_mips.cc b/runtime/arch/mips/entrypoints_init_mips.cc
index 05172db..cbf5681 100644
--- a/runtime/arch/mips/entrypoints_init_mips.cc
+++ b/runtime/arch/mips/entrypoints_init_mips.cc
@@ -184,7 +184,7 @@
   jpoints->pDlsymLookup = art_jni_dlsym_lookup_stub;
 
   // Alloc
-  ResetQuickAllocEntryPoints(qpoints, /*is_active*/ false);
+  ResetQuickAllocEntryPoints(qpoints, /*is_active=*/ false);
 
   // Cast
   qpoints->pInstanceofNonTrivial = artInstanceOfFromCode;
@@ -445,7 +445,7 @@
   // Read barrier.
   qpoints->pReadBarrierJni = ReadBarrierJni;
   static_assert(IsDirectEntrypoint(kQuickReadBarrierJni), "Direct C stub not marked direct.");
-  UpdateReadBarrierEntrypoints(qpoints, /*is_active*/ false);
+  UpdateReadBarrierEntrypoints(qpoints, /*is_active=*/ false);
   // Cannot use the following registers to pass arguments:
   // 0(ZERO), 1(AT), 16(S0), 17(S1), 24(T8), 25(T9), 26(K0), 27(K1), 28(GP), 29(SP), 31(RA).
   // Note that there are 30 entry points only: 00 for register 1(AT), ..., 29 for register 30(S8).
diff --git a/runtime/arch/mips64/entrypoints_init_mips64.cc b/runtime/arch/mips64/entrypoints_init_mips64.cc
index 2acfe14..741d41a 100644
--- a/runtime/arch/mips64/entrypoints_init_mips64.cc
+++ b/runtime/arch/mips64/entrypoints_init_mips64.cc
@@ -191,7 +191,7 @@
 
   // Read barrier.
   qpoints->pReadBarrierJni = ReadBarrierJni;
-  UpdateReadBarrierEntrypoints(qpoints, /*is_active*/ false);
+  UpdateReadBarrierEntrypoints(qpoints, /*is_active=*/ false);
   // Cannot use the following registers to pass arguments:
   // 0(ZERO), 1(AT), 15(T3), 16(S0), 17(S1), 24(T8), 25(T9), 26(K0), 27(K1), 28(GP), 29(SP), 31(RA).
   // Note that there are 30 entry points only: 00 for register 1(AT), ..., 29 for register 30(S8).
diff --git a/runtime/arch/stub_test.cc b/runtime/arch/stub_test.cc
index e8df90e..de19317 100644
--- a/runtime/arch/stub_test.cc
+++ b/runtime/arch/stub_test.cc
@@ -1899,7 +1899,7 @@
   LinearAlloc* linear_alloc = Runtime::Current()->GetLinearAlloc();
   ArtMethod* conflict_method = Runtime::Current()->CreateImtConflictMethod(linear_alloc);
   ImtConflictTable* empty_conflict_table =
-      Runtime::Current()->GetClassLinker()->CreateImtConflictTable(/*count*/0u, linear_alloc);
+      Runtime::Current()->GetClassLinker()->CreateImtConflictTable(/*count=*/0u, linear_alloc);
   void* data = linear_alloc->Alloc(
       self,
       ImtConflictTable::ComputeSizeWithOneMoreEntry(empty_conflict_table, kRuntimePointerSize));
diff --git a/runtime/arch/x86/entrypoints_init_x86.cc b/runtime/arch/x86/entrypoints_init_x86.cc
index ffb0c94..3db4ede 100644
--- a/runtime/arch/x86/entrypoints_init_x86.cc
+++ b/runtime/arch/x86/entrypoints_init_x86.cc
@@ -98,7 +98,7 @@
 
   // Read barrier.
   qpoints->pReadBarrierJni = ReadBarrierJni;
-  UpdateReadBarrierEntrypoints(qpoints, /*is_active*/ false);
+  UpdateReadBarrierEntrypoints(qpoints, /*is_active=*/ false);
   qpoints->pReadBarrierMarkReg04 = nullptr;  // Cannot use register 4 (ESP) to pass arguments.
   // x86 has only 8 core registers.
   qpoints->pReadBarrierMarkReg08 = nullptr;
diff --git a/runtime/arch/x86/instruction_set_features_x86.cc b/runtime/arch/x86/instruction_set_features_x86.cc
index 9846251..e9e983c 100644
--- a/runtime/arch/x86/instruction_set_features_x86.cc
+++ b/runtime/arch/x86/instruction_set_features_x86.cc
@@ -35,27 +35,39 @@
     "atom",
     "sandybridge",
     "silvermont",
+    "kabylake",
 };
 
 static constexpr const char* x86_variants_with_ssse3[] = {
     "atom",
     "sandybridge",
     "silvermont",
+    "kabylake",
 };
 
 static constexpr const char* x86_variants_with_sse4_1[] = {
     "sandybridge",
     "silvermont",
+    "kabylake",
 };
 
 static constexpr const char* x86_variants_with_sse4_2[] = {
     "sandybridge",
     "silvermont",
+    "kabylake",
 };
 
 static constexpr const char* x86_variants_with_popcnt[] = {
     "sandybridge",
     "silvermont",
+    "kabylake",
+};
+static constexpr const char* x86_variants_with_avx[] = {
+    "kabylake",
+};
+
+static constexpr const char* x86_variants_with_avx2[] = {
+    "kabylake",
 };
 
 X86FeaturesUniquePtr X86InstructionSetFeatures::Create(bool x86_64,
@@ -93,9 +105,12 @@
   bool has_SSE4_2 = FindVariantInArray(x86_variants_with_sse4_2,
                                        arraysize(x86_variants_with_sse4_2),
                                        variant);
-  bool has_AVX = false;
-  bool has_AVX2 = false;
-
+  bool has_AVX = FindVariantInArray(x86_variants_with_avx,
+                                    arraysize(x86_variants_with_avx),
+                                    variant);
+  bool has_AVX2 = FindVariantInArray(x86_variants_with_avx2,
+                                    arraysize(x86_variants_with_avx2),
+                                    variant);
   bool has_POPCNT = FindVariantInArray(x86_variants_with_popcnt,
                                        arraysize(x86_variants_with_popcnt),
                                        variant);
diff --git a/runtime/arch/x86/instruction_set_features_x86.h b/runtime/arch/x86/instruction_set_features_x86.h
index 6bd6263..34d908b 100644
--- a/runtime/arch/x86/instruction_set_features_x86.h
+++ b/runtime/arch/x86/instruction_set_features_x86.h
@@ -67,6 +67,8 @@
 
   bool HasPopCnt() const { return has_POPCNT_; }
 
+  bool HasAVX2() const { return has_AVX2_; }
+
  protected:
   // Parse a string of the form "ssse3" adding these to a new InstructionSetFeatures.
   std::unique_ptr<const InstructionSetFeatures>
diff --git a/runtime/arch/x86/instruction_set_features_x86_test.cc b/runtime/arch/x86/instruction_set_features_x86_test.cc
index 33eac0f..cdf15af 100644
--- a/runtime/arch/x86/instruction_set_features_x86_test.cc
+++ b/runtime/arch/x86/instruction_set_features_x86_test.cc
@@ -143,4 +143,40 @@
   EXPECT_FALSE(x86_features->Equals(x86_default_features.get()));
 }
 
+TEST(X86InstructionSetFeaturesTest, X86FeaturesFromKabylakeVariant) {
+  // Build features for a 32-bit kabylake x86 processor.
+  std::string error_msg;
+  std::unique_ptr<const InstructionSetFeatures> x86_features(
+      InstructionSetFeatures::FromVariant(InstructionSet::kX86, "kabylake", &error_msg));
+  ASSERT_TRUE(x86_features.get() != nullptr) << error_msg;
+  EXPECT_EQ(x86_features->GetInstructionSet(), InstructionSet::kX86);
+  EXPECT_TRUE(x86_features->Equals(x86_features.get()));
+  EXPECT_STREQ("ssse3,sse4.1,sse4.2,avx,avx2,popcnt",
+               x86_features->GetFeatureString().c_str());
+  EXPECT_EQ(x86_features->AsBitmap(), 63U);
+
+  // Build features for a 32-bit x86 default processor.
+  std::unique_ptr<const InstructionSetFeatures> x86_default_features(
+      InstructionSetFeatures::FromVariant(InstructionSet::kX86, "default", &error_msg));
+  ASSERT_TRUE(x86_default_features.get() != nullptr) << error_msg;
+  EXPECT_EQ(x86_default_features->GetInstructionSet(), InstructionSet::kX86);
+  EXPECT_TRUE(x86_default_features->Equals(x86_default_features.get()));
+  EXPECT_STREQ("-ssse3,-sse4.1,-sse4.2,-avx,-avx2,-popcnt",
+               x86_default_features->GetFeatureString().c_str());
+  EXPECT_EQ(x86_default_features->AsBitmap(), 0U);
+
+  // Build features for a 64-bit x86-64 kabylake processor.
+  std::unique_ptr<const InstructionSetFeatures> x86_64_features(
+      InstructionSetFeatures::FromVariant(InstructionSet::kX86_64, "kabylake", &error_msg));
+  ASSERT_TRUE(x86_64_features.get() != nullptr) << error_msg;
+  EXPECT_EQ(x86_64_features->GetInstructionSet(), InstructionSet::kX86_64);
+  EXPECT_TRUE(x86_64_features->Equals(x86_64_features.get()));
+  EXPECT_STREQ("ssse3,sse4.1,sse4.2,avx,avx2,popcnt",
+               x86_64_features->GetFeatureString().c_str());
+  EXPECT_EQ(x86_64_features->AsBitmap(), 63U);
+
+  EXPECT_FALSE(x86_64_features->Equals(x86_features.get()));
+  EXPECT_FALSE(x86_64_features->Equals(x86_default_features.get()));
+  EXPECT_FALSE(x86_features->Equals(x86_default_features.get()));
+  }
 }  // namespace art
diff --git a/runtime/arch/x86_64/entrypoints_init_x86_64.cc b/runtime/arch/x86_64/entrypoints_init_x86_64.cc
index 6bae69c..db011ba 100644
--- a/runtime/arch/x86_64/entrypoints_init_x86_64.cc
+++ b/runtime/arch/x86_64/entrypoints_init_x86_64.cc
@@ -120,7 +120,7 @@
 
   // Read barrier.
   qpoints->pReadBarrierJni = ReadBarrierJni;
-  UpdateReadBarrierEntrypoints(qpoints, /*is_active*/ false);
+  UpdateReadBarrierEntrypoints(qpoints, /*is_active=*/ false);
   qpoints->pReadBarrierMarkReg04 = nullptr;  // Cannot use register 4 (RSP) to pass arguments.
   // x86-64 has only 16 core registers.
   qpoints->pReadBarrierMarkReg16 = nullptr;
diff --git a/runtime/art_field.cc b/runtime/art_field.cc
index 6cbd9e4..e20e7f3 100644
--- a/runtime/art_field.cc
+++ b/runtime/art_field.cc
@@ -47,7 +47,7 @@
 ObjPtr<mirror::Class> ArtField::ProxyFindSystemClass(const char* descriptor) {
   DCHECK(GetDeclaringClass()->IsProxyClass());
   ObjPtr<mirror::Class> klass = Runtime::Current()->GetClassLinker()->LookupClass(
-      Thread::Current(), descriptor, /* class_loader */ nullptr);
+      Thread::Current(), descriptor, /* class_loader= */ nullptr);
   DCHECK(klass != nullptr);
   return klass;
 }
diff --git a/runtime/art_method.cc b/runtime/art_method.cc
index 68ccfee..4a19b10 100644
--- a/runtime/art_method.cc
+++ b/runtime/art_method.cc
@@ -324,12 +324,12 @@
   if (UNLIKELY(!runtime->IsStarted() || Dbg::IsForcedInterpreterNeededForCalling(self, this))) {
     if (IsStatic()) {
       art::interpreter::EnterInterpreterFromInvoke(
-          self, this, nullptr, args, result, /*stay_in_interpreter*/ true);
+          self, this, nullptr, args, result, /*stay_in_interpreter=*/ true);
     } else {
       mirror::Object* receiver =
           reinterpret_cast<StackReference<mirror::Object>*>(&args[0])->AsMirrorPtr();
       art::interpreter::EnterInterpreterFromInvoke(
-          self, this, receiver, args + 1, result, /*stay_in_interpreter*/ true);
+          self, this, receiver, args + 1, result, /*stay_in_interpreter=*/ true);
     }
   } else {
     DCHECK_EQ(runtime->GetClassLinker()->GetImagePointerSize(), kRuntimePointerSize);
diff --git a/runtime/base/mem_map_arena_pool.cc b/runtime/base/mem_map_arena_pool.cc
index 851c23f..50b42d4 100644
--- a/runtime/base/mem_map_arena_pool.cc
+++ b/runtime/base/mem_map_arena_pool.cc
@@ -58,7 +58,7 @@
   size = RoundUp(size, kPageSize);
   std::string error_msg;
   MemMap map = MemMap::MapAnonymous(name,
-                                    /* addr */ nullptr,
+                                    /* addr= */ nullptr,
                                     size,
                                     PROT_READ | PROT_WRITE,
                                     low_4gb,
diff --git a/runtime/base/mutex.cc b/runtime/base/mutex.cc
index c11e3d1..9952283 100644
--- a/runtime/base/mutex.cc
+++ b/runtime/base/mutex.cc
@@ -1250,9 +1250,9 @@
     #undef UPDATE_CURRENT_LOCK_LEVEL
 
     // List of mutexes that we may hold when accessing a weak ref.
-    AddToExpectedMutexesOnWeakRefAccess(dex_lock_, /*need_lock*/ false);
-    AddToExpectedMutexesOnWeakRefAccess(classlinker_classes_lock_, /*need_lock*/ false);
-    AddToExpectedMutexesOnWeakRefAccess(jni_libraries_lock_, /*need_lock*/ false);
+    AddToExpectedMutexesOnWeakRefAccess(dex_lock_, /*need_lock=*/ false);
+    AddToExpectedMutexesOnWeakRefAccess(classlinker_classes_lock_, /*need_lock=*/ false);
+    AddToExpectedMutexesOnWeakRefAccess(jni_libraries_lock_, /*need_lock=*/ false);
 
     InitConditions();
   }
diff --git a/runtime/cha.cc b/runtime/cha.cc
index b600df6..de4aebe 100644
--- a/runtime/cha.cc
+++ b/runtime/cha.cc
@@ -363,7 +363,7 @@
     // non-single-implementation already.
     VerifyNonSingleImplementation(klass->GetSuperClass()->GetSuperClass(),
                                   method_in_super->GetMethodIndex(),
-                                  nullptr /* excluded_method */);
+                                  /* excluded_method= */ nullptr);
     return;
   }
 
diff --git a/runtime/class_linker-inl.h b/runtime/class_linker-inl.h
index 2536b23..0dc62d3 100644
--- a/runtime/class_linker-inl.h
+++ b/runtime/class_linker-inl.h
@@ -315,7 +315,7 @@
     // Check if the invoke type matches the class type.
     ObjPtr<mirror::DexCache> dex_cache = referrer->GetDexCache();
     ObjPtr<mirror::ClassLoader> class_loader = referrer->GetClassLoader();
-    if (CheckInvokeClassMismatch</* kThrow */ false>(dex_cache, type, method_idx, class_loader)) {
+    if (CheckInvokeClassMismatch</* kThrow= */ false>(dex_cache, type, method_idx, class_loader)) {
       return nullptr;
     }
     // Check access.
@@ -366,7 +366,7 @@
     // Check if the invoke type matches the class type.
     ObjPtr<mirror::DexCache> dex_cache = referrer->GetDexCache();
     ObjPtr<mirror::ClassLoader> class_loader = referrer->GetClassLoader();
-    if (CheckInvokeClassMismatch</* kThrow */ true>(dex_cache, type, method_idx, class_loader)) {
+    if (CheckInvokeClassMismatch</* kThrow= */ true>(dex_cache, type, method_idx, class_loader)) {
       DCHECK(Thread::Current()->IsExceptionPending());
       return nullptr;
     }
diff --git a/runtime/class_linker.cc b/runtime/class_linker.cc
index bc86841..dab8d58 100644
--- a/runtime/class_linker.cc
+++ b/runtime/class_linker.cc
@@ -496,7 +496,7 @@
   // Space (LOS) -- see the comment about the dirty card scanning logic in
   // art::gc::collector::ConcurrentCopying::MarkingPhase.
   Handle<mirror::Class> java_lang_String(hs.NewHandle(
-      AllocClass</* kMovable */ false>(
+      AllocClass</* kMovable= */ false>(
           self, java_lang_Class.Get(), mirror::String::ClassSize(image_pointer_size_))));
   java_lang_String->SetStringClass();
   mirror::Class::SetStatus(java_lang_String, ClassStatus::kResolved, self);
@@ -1039,8 +1039,8 @@
     std::vector<std::unique_ptr<const DexFile>> dex_files;
     if (!AddImageSpace(image_space,
                        ScopedNullHandle<mirror::ClassLoader>(),
-                       /*dex_elements*/nullptr,
-                       /*dex_location*/nullptr,
+                       /*dex_elements=*/nullptr,
+                       /*dex_location=*/nullptr,
                        /*out*/&dex_files,
                        error_msg)) {
       return false;
@@ -1127,7 +1127,10 @@
       }
       return true;  // Continue with the next Element.
     };
-    bool error = VisitClassLoaderDexElements(soa, handle, add_element_names, /* error */ false);
+    bool error = VisitClassLoaderDexElements(soa,
+                                             handle,
+                                             add_element_names,
+                                             /* defaultReturn= */ false);
     if (error) {
       // An error occurred during DexPathList Element visiting.
       return false;
@@ -1259,16 +1262,16 @@
     REQUIRES_SHARED(Locks::mutator_lock_) {
     if (space.HasAddress(obj)) {
       if (obj->IsDexCache()) {
-        obj->VisitReferences</* kVisitNativeRoots */ true,
-                                                     kVerifyNone,
-                                                     kWithoutReadBarrier>(visitor, visitor);
+        obj->VisitReferences</*kVisitNativeRoots=*/ true,
+                                                    kVerifyNone,
+                                                    kWithoutReadBarrier>(visitor, visitor);
       } else {
         // Don't visit native roots for non-dex-cache as they can't contain
         // native references to strings.  This is verified during compilation
         // by ImageWriter::VerifyNativeGCRootInvariants.
-        obj->VisitReferences</* kVisitNativeRoots */ false,
-                                                     kVerifyNone,
-                                                     kWithoutReadBarrier>(visitor, visitor);
+        obj->VisitReferences</*kVisitNativeRoots=*/ false,
+                                                    kVerifyNone,
+                                                    kWithoutReadBarrier>(visitor, visitor);
       }
     }
   });
@@ -2241,7 +2244,7 @@
   for (const ClassLoaderData& data : class_loaders_) {
     // CHA unloading analysis is not needed. No negative consequences are expected because
     // all the classloaders are deleted at the same time.
-    DeleteClassLoader(self, data, false /*cleanup_cha*/);
+    DeleteClassLoader(self, data, /*cleanup_cha=*/ false);
   }
   class_loaders_.clear();
 }
@@ -2345,7 +2348,7 @@
   // in the `klass_` field of one of its instances allocated in the Large-Object
   // Space (LOS) -- see the comment about the dirty card scanning logic in
   // art::gc::collector::ConcurrentCopying::MarkingPhase.
-  return AllocClass</* kMovable */ false>(
+  return AllocClass</* kMovable= */ false>(
       self, java_lang_Class, mirror::Array::ClassSize(image_pointer_size_));
 }
 
@@ -3442,7 +3445,7 @@
   CHECK(dex_cache != nullptr) << dex_file.GetLocation();
   boot_class_path_.push_back(&dex_file);
   WriterMutexLock mu(Thread::Current(), *Locks::dex_lock_);
-  RegisterDexFileLocked(dex_file, dex_cache, /* class_loader */ nullptr);
+  RegisterDexFileLocked(dex_file, dex_cache, /* class_loader= */ nullptr);
 }
 
 void ClassLinker::RegisterDexFileLocked(const DexFile& dex_file,
@@ -5013,7 +5016,7 @@
         ArtField* art_field = ResolveField(field.GetIndex(),
                                            dex_cache,
                                            class_loader,
-                                           /* is_static */ true);
+                                           /* is_static= */ true);
         if (Runtime::Current()->IsActiveTransaction()) {
           value_it.ReadValueToField<true>(art_field);
         } else {
@@ -6413,8 +6416,8 @@
                        unimplemented_method,
                        conflict_method,
                        klass,
-                       /*create_conflict_tables*/true,
-                       /*ignore_copied_methods*/false,
+                       /*create_conflict_tables=*/true,
+                       /*ignore_copied_methods=*/false,
                        &new_conflict,
                        &imt_data[0]);
   }
@@ -6902,8 +6905,8 @@
                          unimplemented_method,
                          imt_conflict_method,
                          klass.Get(),
-                         /*create_conflict_table*/false,
-                         /*ignore_copied_methods*/true,
+                         /*create_conflict_tables=*/false,
+                         /*ignore_copied_methods=*/true,
                          /*out*/new_conflict,
                          /*out*/imt);
     }
@@ -8121,7 +8124,7 @@
 
   // Check if the invoke type matches the class type.
   if (kResolveMode == ResolveMode::kCheckICCEAndIAE &&
-      CheckInvokeClassMismatch</* kThrow */ true>(
+      CheckInvokeClassMismatch</* kThrow= */ true>(
           dex_cache.Get(), type, [klass]() { return klass; })) {
     DCHECK(Thread::Current()->IsExceptionPending());
     return nullptr;
@@ -9089,7 +9092,7 @@
   }
   for (ClassLoaderData& data : to_delete) {
     // CHA unloading analysis and SingleImplementaion cleanups are required.
-    DeleteClassLoader(self, data, true /*cleanup_cha*/);
+    DeleteClassLoader(self, data, /*cleanup_cha=*/ true);
   }
 }
 
@@ -9235,11 +9238,11 @@
     InvokeType type);
 
 // Instantiate ClassLinker::AllocClass.
-template ObjPtr<mirror::Class> ClassLinker::AllocClass</* kMovable */ true>(
+template ObjPtr<mirror::Class> ClassLinker::AllocClass</* kMovable= */ true>(
     Thread* self,
     ObjPtr<mirror::Class> java_lang_Class,
     uint32_t class_size);
-template ObjPtr<mirror::Class> ClassLinker::AllocClass</* kMovable */ false>(
+template ObjPtr<mirror::Class> ClassLinker::AllocClass</* kMovable= */ false>(
     Thread* self,
     ObjPtr<mirror::Class> java_lang_Class,
     uint32_t class_size);
diff --git a/runtime/class_linker.h b/runtime/class_linker.h
index b6f1f86..a48dfaf 100644
--- a/runtime/class_linker.h
+++ b/runtime/class_linker.h
@@ -1043,12 +1043,12 @@
    public:
     // This slot must become a default conflict method.
     static MethodTranslation CreateConflictingMethod() {
-      return MethodTranslation(Type::kConflict, /*translation*/nullptr);
+      return MethodTranslation(Type::kConflict, /*translation=*/nullptr);
     }
 
     // This slot must become an abstract method.
     static MethodTranslation CreateAbstractMethod() {
-      return MethodTranslation(Type::kAbstract, /*translation*/nullptr);
+      return MethodTranslation(Type::kAbstract, /*translation=*/nullptr);
     }
 
     // Use the given method as the current value for this vtable slot during translation.
diff --git a/runtime/class_linker_test.cc b/runtime/class_linker_test.cc
index ab7182a..27ac90b 100644
--- a/runtime/class_linker_test.cc
+++ b/runtime/class_linker_test.cc
@@ -1034,8 +1034,8 @@
   // Force initialization to turn the class erroneous.
   bool initialized = class_linker_->EnsureInitialized(soa.Self(),
                                                       klass,
-                                                      /* can_init_fields */ true,
-                                                      /* can_init_parents */ true);
+                                                      /* can_init_fields= */ true,
+                                                      /* can_init_parents= */ true);
   EXPECT_FALSE(initialized);
   EXPECT_TRUE(soa.Self()->IsExceptionPending());
   soa.Self()->ClearException();
@@ -1320,15 +1320,15 @@
   ObjPtr<mirror::Class> uninit = ResolveVerifyAndClinit(type_idx,
                                                         clinit,
                                                         soa.Self(),
-                                                        /* can_run_clinit */ true,
-                                                        /* verify_access */ false);
+                                                        /* can_run_clinit= */ true,
+                                                        /* verify_access= */ false);
   EXPECT_TRUE(uninit != nullptr);
   EXPECT_FALSE(uninit->IsInitialized());
   ObjPtr<mirror::Class> init = ResolveVerifyAndClinit(type_idx,
                                                       getS0,
                                                       soa.Self(),
-                                                      /* can_run_clinit */ true,
-                                                      /* verify_access */ false);
+                                                      /* can_run_clinit= */ true,
+                                                      /* verify_access= */ false);
   EXPECT_TRUE(init != nullptr);
   EXPECT_TRUE(init->IsInitialized());
 }
@@ -1530,7 +1530,7 @@
   {
     WriterMutexLock mu(soa.Self(), *Locks::dex_lock_);
     // Check that inserting with a UTF16 name works.
-    class_linker->RegisterDexFileLocked(*dex_file, dex_cache.Get(), /* class_loader */ nullptr);
+    class_linker->RegisterDexFileLocked(*dex_file, dex_cache.Get(), /* class_loader= */ nullptr);
   }
 }
 
@@ -1699,14 +1699,14 @@
   jobject class_loader_a = LoadDexInPathClassLoader("ForClassLoaderA", nullptr);
   VerifyClassResolution("LDefinedInA;", class_loader_a, class_loader_a);
   VerifyClassResolution("Ljava/lang/String;", class_loader_a, nullptr);
-  VerifyClassResolution("LDefinedInB;", class_loader_a, nullptr, /*should_find*/ false);
+  VerifyClassResolution("LDefinedInB;", class_loader_a, nullptr, /*should_find=*/ false);
 }
 
 TEST_F(ClassLinkerClassLoaderTest, CreateDelegateLastClassLoader) {
   jobject class_loader_a = LoadDexInDelegateLastClassLoader("ForClassLoaderA", nullptr);
   VerifyClassResolution("LDefinedInA;", class_loader_a, class_loader_a);
   VerifyClassResolution("Ljava/lang/String;", class_loader_a, nullptr);
-  VerifyClassResolution("LDefinedInB;", class_loader_a, nullptr, /*should_find*/ false);
+  VerifyClassResolution("LDefinedInB;", class_loader_a, nullptr, /*should_find=*/ false);
 }
 
 TEST_F(ClassLinkerClassLoaderTest, CreateClassLoaderChain) {
@@ -1753,7 +1753,7 @@
   VerifyClassResolution("LDefinedInAC;", class_loader_d, class_loader_a);
 
   // Sanity check that we don't find an undefined class.
-  VerifyClassResolution("LNotDefined;", class_loader_d, nullptr, /*should_find*/ false);
+  VerifyClassResolution("LNotDefined;", class_loader_d, nullptr, /*should_find=*/ false);
 }
 
 }  // namespace art
diff --git a/runtime/class_loader_context.cc b/runtime/class_loader_context.cc
index 5c8d685..dd10f3c 100644
--- a/runtime/class_loader_context.cc
+++ b/runtime/class_loader_context.cc
@@ -223,7 +223,7 @@
       if (!dex_file_loader.Open(location.c_str(),
                                 location.c_str(),
                                 Runtime::Current()->IsVerificationEnabled(),
-                                /*verify_checksum*/ true,
+                                /*verify_checksum=*/ true,
                                 &error_msg,
                                 &info.opened_dex_files)) {
         // If we fail to open the dex file because it's been stripped, try to open the dex file
@@ -298,12 +298,12 @@
 }
 
 std::string ClassLoaderContext::EncodeContextForDex2oat(const std::string& base_dir) const {
-  return EncodeContext(base_dir, /*for_dex2oat*/ true, /*stored_context*/ nullptr);
+  return EncodeContext(base_dir, /*for_dex2oat=*/ true, /*stored_context=*/ nullptr);
 }
 
 std::string ClassLoaderContext::EncodeContextForOatFile(const std::string& base_dir,
                                                         ClassLoaderContext* stored_context) const {
-  return EncodeContext(base_dir, /*for_dex2oat*/ false, stored_context);
+  return EncodeContext(base_dir, /*for_dex2oat=*/ false, stored_context);
 }
 
 std::string ClassLoaderContext::EncodeContext(const std::string& base_dir,
@@ -663,7 +663,7 @@
   Handle<mirror::ObjectArray<mirror::Object>> h_dex_elements =
       hs.NewHandle(soa.Decode<mirror::ObjectArray<mirror::Object>>(dex_elements));
 
-  std::unique_ptr<ClassLoaderContext> result(new ClassLoaderContext(/*owns_the_dex_files*/ false));
+  std::unique_ptr<ClassLoaderContext> result(new ClassLoaderContext(/*owns_the_dex_files=*/ false));
   if (result->AddInfoToContextFromClassLoader(soa, h_class_loader, h_dex_elements)) {
     return result;
   } else {
diff --git a/runtime/class_loader_context_test.cc b/runtime/class_loader_context_test.cc
index 5e3f48c..ea624f1 100644
--- a/runtime/class_loader_context_test.cc
+++ b/runtime/class_loader_context_test.cc
@@ -125,7 +125,7 @@
 
   std::unique_ptr<ClassLoaderContext> ParseContextWithChecksums(const std::string& context_spec) {
     std::unique_ptr<ClassLoaderContext> context(new ClassLoaderContext());
-    if (!context->Parse(context_spec, /*parse_checksums*/ true)) {
+    if (!context->Parse(context_spec, /*parse_checksums=*/ true)) {
       return nullptr;
     }
     return context;
@@ -263,7 +263,7 @@
           "PCL[" + multidex_name + ":" + myclass_dex_name + "];" +
           "DLC[" + dex_name + "]");
 
-  ASSERT_TRUE(context->OpenDexFiles(InstructionSet::kArm, /*classpath_dir*/ ""));
+  ASSERT_TRUE(context->OpenDexFiles(InstructionSet::kArm, /*classpath_dir=*/ ""));
 
   VerifyContextSize(context.get(), 2);
 
@@ -314,7 +314,7 @@
           "PCL[" + multidex_name + ":" + myclass_dex_name + "];" +
           "DLC[" + dex_name + "]");
 
-  ASSERT_TRUE(context->OpenDexFiles(InstructionSet::kArm, /*classpath_dir*/ ""));
+  ASSERT_TRUE(context->OpenDexFiles(InstructionSet::kArm, /*classpath_dir=*/ ""));
 
   std::vector<std::unique_ptr<const DexFile>> all_dex_files0 = OpenTestDexFiles("MultiDex");
   std::vector<std::unique_ptr<const DexFile>> myclass_dex_files = OpenTestDexFiles("MyClass");
diff --git a/runtime/class_loader_utils.h b/runtime/class_loader_utils.h
index 78ad568..945d659 100644
--- a/runtime/class_loader_utils.h
+++ b/runtime/class_loader_utils.h
@@ -160,7 +160,7 @@
   VisitClassLoaderDexFiles<decltype(helper), void*>(soa,
                                                     class_loader,
                                                     helper,
-                                                    /* default */ nullptr);
+                                                    /* default= */ nullptr);
 }
 
 }  // namespace art
diff --git a/runtime/common_runtime_test.h b/runtime/common_runtime_test.h
index 1460562..774f19e 100644
--- a/runtime/common_runtime_test.h
+++ b/runtime/common_runtime_test.h
@@ -78,8 +78,8 @@
     const ArtDexFileLoader dex_file_loader;
     CHECK(dex_file_loader.Open(input_jar.c_str(),
                                input_jar.c_str(),
-                               /*verify*/ true,
-                               /*verify_checksum*/ true,
+                               /*verify=*/ true,
+                               /*verify_checksum=*/ true,
                                &error_msg,
                                &dex_files)) << error_msg;
     EXPECT_EQ(dex_files.size(), 1u) << "Only one input dex is supported";
diff --git a/runtime/common_throws.cc b/runtime/common_throws.cc
index 7199d5e..7a08cb3 100644
--- a/runtime/common_throws.cc
+++ b/runtime/common_throws.cc
@@ -105,10 +105,10 @@
 }
 
 void ThrowAbstractMethodError(uint32_t method_idx, const DexFile& dex_file) {
-  ThrowException("Ljava/lang/AbstractMethodError;", /* referrer */ nullptr,
+  ThrowException("Ljava/lang/AbstractMethodError;", /* referrer= */ nullptr,
                  StringPrintf("abstract method \"%s\"",
                               dex_file.PrettyMethod(method_idx,
-                                                    /* with_signature */ true).c_str()).c_str());
+                                                    /* with_signature= */ true).c_str()).c_str());
 }
 
 // ArithmeticException
@@ -324,7 +324,7 @@
 void ThrowIncompatibleClassChangeErrorForMethodConflict(ArtMethod* method) {
   DCHECK(method != nullptr);
   ThrowException("Ljava/lang/IncompatibleClassChangeError;",
-                 /*referrer*/nullptr,
+                 /*referrer=*/nullptr,
                  StringPrintf("Conflicting default method implementations %s",
                               ArtMethod::PrettyMethod(method).c_str()).c_str());
 }
@@ -633,7 +633,7 @@
       ArtField* field =
           Runtime::Current()->GetClassLinker()->ResolveField(instr.VRegC_22c(), method, false);
       Thread::Current()->ClearException();  // Resolution may fail, ignore.
-      ThrowNullPointerExceptionForFieldAccess(field, true /* read */);
+      ThrowNullPointerExceptionForFieldAccess(field, /* is_read= */ true);
       break;
     }
     case Instruction::IGET_QUICK:
@@ -647,9 +647,9 @@
       ArtField* field = nullptr;
       CHECK_NE(field_idx, DexFile::kDexNoIndex16);
       field = Runtime::Current()->GetClassLinker()->ResolveField(
-          field_idx, method, /* is_static */ false);
+          field_idx, method, /* is_static= */ false);
       Thread::Current()->ClearException();  // Resolution may fail, ignore.
-      ThrowNullPointerExceptionForFieldAccess(field, true /* read */);
+      ThrowNullPointerExceptionForFieldAccess(field, /* is_read= */ true);
       break;
     }
     case Instruction::IPUT:
@@ -660,9 +660,9 @@
     case Instruction::IPUT_CHAR:
     case Instruction::IPUT_SHORT: {
       ArtField* field = Runtime::Current()->GetClassLinker()->ResolveField(
-          instr.VRegC_22c(), method, /* is_static */ false);
+          instr.VRegC_22c(), method, /* is_static= */ false);
       Thread::Current()->ClearException();  // Resolution may fail, ignore.
-      ThrowNullPointerExceptionForFieldAccess(field, false /* write */);
+      ThrowNullPointerExceptionForFieldAccess(field, /* is_read= */ false);
       break;
     }
     case Instruction::IPUT_QUICK:
@@ -676,9 +676,9 @@
       ArtField* field = nullptr;
       CHECK_NE(field_idx, DexFile::kDexNoIndex16);
       field = Runtime::Current()->GetClassLinker()->ResolveField(
-          field_idx, method, /* is_static */ false);
+          field_idx, method, /* is_static= */ false);
       Thread::Current()->ClearException();  // Resolution may fail, ignore.
-      ThrowNullPointerExceptionForFieldAccess(field, false /* write */);
+      ThrowNullPointerExceptionForFieldAccess(field, /* is_read= */ false);
       break;
     }
     case Instruction::AGET:
diff --git a/runtime/debug_print.cc b/runtime/debug_print.cc
index cb334b5..2939b00 100644
--- a/runtime/debug_print.cc
+++ b/runtime/debug_print.cc
@@ -37,7 +37,7 @@
   std::ostringstream oss;
   gc::Heap* heap = Runtime::Current()->GetHeap();
   gc::space::ContinuousSpace* cs =
-      heap->FindContinuousSpaceFromObject(klass, /* fail_ok */ true);
+      heap->FindContinuousSpaceFromObject(klass, /* fail_ok= */ true);
   if (cs != nullptr) {
     if (cs->IsImageSpace()) {
       gc::space::ImageSpace* ispace = cs->AsImageSpace();
@@ -50,7 +50,7 @@
     }
   } else {
     gc::space::DiscontinuousSpace* ds =
-        heap->FindDiscontinuousSpaceFromObject(klass, /* fail_ok */ true);
+        heap->FindDiscontinuousSpaceFromObject(klass, /* fail_ok= */ true);
     if (ds != nullptr) {
       oss << "discontinuous;" << ds->GetName();
     } else {
diff --git a/runtime/debugger.cc b/runtime/debugger.cc
index b679cbe..099cadc 100644
--- a/runtime/debugger.cc
+++ b/runtime/debugger.cc
@@ -65,6 +65,7 @@
 #include "oat_file.h"
 #include "obj_ptr-inl.h"
 #include "reflection.h"
+#include "runtime-inl.h"
 #include "scoped_thread_state_change-inl.h"
 #include "stack.h"
 #include "thread_list.h"
@@ -688,7 +689,7 @@
     runtime->GetInstrumentation()->EnableDeoptimization();
   }
   instrumentation_events_ = 0;
-  gDebuggerActive = true;
+  Runtime::DoAndMaybeSwitchInterpreter([=](){ gDebuggerActive = true; });
   Runtime::Current()->GetRuntimeCallbacks()->AddMethodInspectionCallback(&gDebugActiveCallback);
   LOG(INFO) << "Debugger is active";
 }
@@ -726,7 +727,7 @@
       if (RequiresDeoptimization()) {
         runtime->GetInstrumentation()->DisableDeoptimization(kDbgInstrumentationKey);
       }
-      gDebuggerActive = false;
+      Runtime::DoAndMaybeSwitchInterpreter([=](){ gDebuggerActive = false; });
       Runtime::Current()->GetRuntimeCallbacks()->RemoveMethodInspectionCallback(
           &gDebugActiveCallback);
     }
@@ -943,7 +944,7 @@
 JDWP::JdwpError Dbg::GetInstanceCounts(const std::vector<JDWP::RefTypeId>& class_ids,
                                        std::vector<uint64_t>* counts) {
   gc::Heap* heap = Runtime::Current()->GetHeap();
-  heap->CollectGarbage(/* clear_soft_references */ false, gc::GcCause::kGcCauseDebugger);
+  heap->CollectGarbage(/* clear_soft_references= */ false, gc::GcCause::kGcCauseDebugger);
   VariableSizedHandleScope hs(Thread::Current());
   std::vector<Handle<mirror::Class>> classes;
   counts->clear();
@@ -964,7 +965,7 @@
                                   std::vector<JDWP::ObjectId>* instances) {
   gc::Heap* heap = Runtime::Current()->GetHeap();
   // We only want reachable instances, so do a GC.
-  heap->CollectGarbage(/* clear_soft_references */ false, gc::GcCause::kGcCauseDebugger);
+  heap->CollectGarbage(/* clear_soft_references= */ false, gc::GcCause::kGcCauseDebugger);
   JDWP::JdwpError error;
   ObjPtr<mirror::Class> c = DecodeClass(class_id, &error);
   if (c == nullptr) {
@@ -974,7 +975,7 @@
   std::vector<Handle<mirror::Object>> raw_instances;
   Runtime::Current()->GetHeap()->GetInstances(hs,
                                               hs.NewHandle(c),
-                                              /* use_is_assignable_from */ false,
+                                              /* use_is_assignable_from= */ false,
                                               max_count,
                                               raw_instances);
   for (size_t i = 0; i < raw_instances.size(); ++i) {
@@ -986,7 +987,7 @@
 JDWP::JdwpError Dbg::GetReferringObjects(JDWP::ObjectId object_id, int32_t max_count,
                                          std::vector<JDWP::ObjectId>* referring_objects) {
   gc::Heap* heap = Runtime::Current()->GetHeap();
-  heap->CollectGarbage(/* clear_soft_references */ false, gc::GcCause::kGcCauseDebugger);
+  heap->CollectGarbage(/* clear_soft_references= */ false, gc::GcCause::kGcCauseDebugger);
   JDWP::JdwpError error;
   ObjPtr<mirror::Object> o = gRegistry->Get<mirror::Object*>(object_id, &error);
   if (o == nullptr) {
@@ -3074,7 +3075,7 @@
   Handle<mirror::Throwable> h_exception(handle_scope.NewHandle(exception_object));
   std::unique_ptr<Context> context(Context::Create());
   CatchLocationFinder clf(self, h_exception, context.get());
-  clf.WalkStack(/* include_transitions */ false);
+  clf.WalkStack(/* include_transitions= */ false);
   JDWP::EventLocation exception_throw_location;
   SetEventLocation(&exception_throw_location, clf.GetThrowMethod(), clf.GetThrowDexPc());
   JDWP::EventLocation exception_catch_location;
@@ -3733,7 +3734,7 @@
           bool timed_out;
           ThreadList* const thread_list = Runtime::Current()->GetThreadList();
           suspended_thread = thread_list->SuspendThreadByPeer(thread_peer,
-                                                              /* request_suspension */ true,
+                                                              /* request_suspension= */ true,
                                                               SuspendReason::kForDebugger,
                                                               &timed_out);
         }
@@ -4744,7 +4745,7 @@
       REQUIRES_SHARED(Locks::mutator_lock_) {
     if (ProcessRecord(start, used_bytes)) {
       uint8_t state = ExamineNativeObject(start);
-      AppendChunk(state, start, used_bytes + chunk_overhead_, true /*is_native*/);
+      AppendChunk(state, start, used_bytes + chunk_overhead_, /*is_native=*/ true);
       startOfNextMemoryChunk_ = reinterpret_cast<char*>(start) + used_bytes + chunk_overhead_;
     }
   }
@@ -4756,7 +4757,7 @@
       // OLD-TODO: if context.merge, see if this chunk is different from the last chunk.
       // If it's the same, we should combine them.
       uint8_t state = ExamineJavaObject(reinterpret_cast<mirror::Object*>(start));
-      AppendChunk(state, start, used_bytes + chunk_overhead_, false /*is_native*/);
+      AppendChunk(state, start, used_bytes + chunk_overhead_, /*is_native=*/ false);
       startOfNextMemoryChunk_ = reinterpret_cast<char*>(start) + used_bytes + chunk_overhead_;
     }
   }
diff --git a/runtime/dex/dex_file_annotations.cc b/runtime/dex/dex_file_annotations.cc
index b50a430..fb63c82 100644
--- a/runtime/dex/dex_file_annotations.cc
+++ b/runtime/dex/dex_file_annotations.cc
@@ -1251,7 +1251,7 @@
     // WellKnownClasses may not be initialized yet, so `klass` may be null.
     if (klass != nullptr) {
       // Lookup using the boot class path loader should yield the annotation class.
-      CHECK_EQ(klass, linker->LookupClass(soa.Self(), descriptor, /* class_loader */ nullptr));
+      CHECK_EQ(klass, linker->LookupClass(soa.Self(), descriptor, /* class_loader= */ nullptr));
     }
   }
 }
diff --git a/runtime/dex2oat_environment_test.h b/runtime/dex2oat_environment_test.h
index 2cbf557..fbcee39 100644
--- a/runtime/dex2oat_environment_test.h
+++ b/runtime/dex2oat_environment_test.h
@@ -87,7 +87,7 @@
     std::vector<std::unique_ptr<const DexFile>> multi1;
     ASSERT_TRUE(dex_file_loader.Open(GetMultiDexSrc1().c_str(),
                                      GetMultiDexSrc1().c_str(),
-                                     /* verify */ true,
+                                     /* verify= */ true,
                                      kVerifyChecksum,
                                      &error_msg,
                                      &multi1)) << error_msg;
@@ -96,7 +96,7 @@
     std::vector<std::unique_ptr<const DexFile>> multi2;
     ASSERT_TRUE(dex_file_loader.Open(GetMultiDexSrc2().c_str(),
                                      GetMultiDexSrc2().c_str(),
-                                     /* verify */ true,
+                                     /* verify= */ true,
                                      kVerifyChecksum,
                                      &error_msg,
                                      &multi2)) << error_msg;
diff --git a/runtime/dexopt_test.cc b/runtime/dexopt_test.cc
index 429ecd3..13f5fcb2 100644
--- a/runtime/dexopt_test.cc
+++ b/runtime/dexopt_test.cc
@@ -206,7 +206,7 @@
                                                       reinterpret_cast<uint8_t*>(start),
                                                       end - start,
                                                       PROT_NONE,
-                                                      /* low_4gb*/ false,
+                                                      /* low_4gb=*/ false,
                                                       &error_msg));
     ASSERT_TRUE(image_reservation_.back().IsValid()) << error_msg;
     LOG(INFO) << "Reserved space for image " <<
diff --git a/runtime/elf_file.cc b/runtime/elf_file.cc
index ce742fe..4e5fe5f 100644
--- a/runtime/elf_file.cc
+++ b/runtime/elf_file.cc
@@ -86,7 +86,7 @@
                                                    bool low_4gb,
                                                    std::string* error_msg) {
   std::unique_ptr<ElfFileImpl<ElfTypes>> elf_file(
-      new ElfFileImpl<ElfTypes>(file, (prot & PROT_WRITE) != 0, /* program_header_only */ false));
+      new ElfFileImpl<ElfTypes>(file, (prot & PROT_WRITE) != 0, /* program_header_only= */ false));
   if (!elf_file->Setup(file, prot, flags, low_4gb, error_msg)) {
     return nullptr;
   }
@@ -1163,7 +1163,7 @@
           vaddr_size,
           PROT_NONE,
           low_4gb,
-          /* reuse */ false,
+          /* reuse= */ false,
           reservation,
           error_msg);
       if (!local_reservation.IsValid()) {
@@ -1237,10 +1237,10 @@
                                    flags,
                                    file->Fd(),
                                    program_header->p_offset,
-                                   /* low4_gb */ false,
+                                   /* low_4gb= */ false,
                                    file->GetPath().c_str(),
-                                   /* reuse */ true,  // implies MAP_FIXED
-                                   /* reservation */ nullptr,
+                                   /* reuse= */ true,  // implies MAP_FIXED
+                                   /* reservation= */ nullptr,
                                    error_msg);
       if (!segment.IsValid()) {
         *error_msg = StringPrintf("Failed to map ELF file segment %d from %s: %s",
@@ -1262,9 +1262,9 @@
                                             p_vaddr + program_header->p_filesz,
                                             program_header->p_memsz - program_header->p_filesz,
                                             prot,
-                                            /* low_4gb */ false,
-                                            /* reuse */ true,
-                                            /* reservation */ nullptr,
+                                            /* low_4gb= */ false,
+                                            /* reuse= */ true,
+                                            /* reservation= */ nullptr,
                                             error_msg);
       if (!segment.IsValid()) {
         *error_msg = StringPrintf("Failed to map zero-initialized ELF file segment %d from %s: %s",
@@ -1763,7 +1763,7 @@
                                PROT_READ,
                                MAP_PRIVATE,
                                file->Fd(),
-                               /* start */ 0,
+                               /* start= */ 0,
                                low_4gb,
                                file->GetPath().c_str(),
                                error_msg);
@@ -1886,7 +1886,7 @@
 }
 
 bool ElfFile::Strip(File* file, std::string* error_msg) {
-  std::unique_ptr<ElfFile> elf_file(ElfFile::Open(file, true, false, /*low_4gb*/false, error_msg));
+  std::unique_ptr<ElfFile> elf_file(ElfFile::Open(file, true, false, /*low_4gb=*/false, error_msg));
   if (elf_file.get() == nullptr) {
     return false;
   }
diff --git a/runtime/entrypoints/entrypoint_utils-inl.h b/runtime/entrypoints/entrypoint_utils-inl.h
index 35bfa91..120a0e9 100644
--- a/runtime/entrypoints/entrypoint_utils-inl.h
+++ b/runtime/entrypoints/entrypoint_utils-inl.h
@@ -191,7 +191,7 @@
       return nullptr;
     }
     // CheckObjectAlloc can cause thread suspension which means we may now be instrumented.
-    return klass->Alloc</*kInstrumented*/true>(
+    return klass->Alloc</*kInstrumented=*/true>(
         self,
         Runtime::Current()->GetHeap()->GetCurrentAllocator()).Ptr();
   }
@@ -216,7 +216,7 @@
     // Pass in false since the object cannot be finalizable.
     // CheckClassInitializedForObjectAlloc can cause thread suspension which means we may now be
     // instrumented.
-    return klass->Alloc</*kInstrumented*/true, false>(self, heap->GetCurrentAllocator()).Ptr();
+    return klass->Alloc</*kInstrumented=*/true, false>(self, heap->GetCurrentAllocator()).Ptr();
   }
   // Pass in false since the object cannot be finalizable.
   return klass->Alloc<kInstrumented, false>(self, allocator_type).Ptr();
@@ -287,11 +287,11 @@
     }
     gc::Heap* heap = Runtime::Current()->GetHeap();
     // CheckArrayAlloc can cause thread suspension which means we may now be instrumented.
-    return mirror::Array::Alloc</*kInstrumented*/true>(self,
-                                                       klass,
-                                                       component_count,
-                                                       klass->GetComponentSizeShift(),
-                                                       heap->GetCurrentAllocator());
+    return mirror::Array::Alloc</*kInstrumented=*/true>(self,
+                                                        klass,
+                                                        component_count,
+                                                        klass->GetComponentSizeShift(),
+                                                        heap->GetCurrentAllocator());
   }
   return mirror::Array::Alloc<kInstrumented>(self, klass, component_count,
                                              klass->GetComponentSizeShift(), allocator_type);
diff --git a/runtime/entrypoints/entrypoint_utils.cc b/runtime/entrypoints/entrypoint_utils.cc
index 5421f69..12136bf 100644
--- a/runtime/entrypoints/entrypoint_utils.cc
+++ b/runtime/entrypoints/entrypoint_utils.cc
@@ -244,7 +244,7 @@
   result.outer_method = outer_caller_and_pc.first;
   uintptr_t caller_pc = outer_caller_and_pc.second;
   result.caller =
-      DoGetCalleeSaveMethodCaller(result.outer_method, caller_pc, /* do_caller_check */ true);
+      DoGetCalleeSaveMethodCaller(result.outer_method, caller_pc, /* do_caller_check= */ true);
   return result;
 }
 
diff --git a/runtime/entrypoints/quick/quick_default_init_entrypoints.h b/runtime/entrypoints/quick/quick_default_init_entrypoints.h
index 8e784c1..ce12fde 100644
--- a/runtime/entrypoints/quick/quick_default_init_entrypoints.h
+++ b/runtime/entrypoints/quick/quick_default_init_entrypoints.h
@@ -31,7 +31,7 @@
   jpoints->pDlsymLookup = art_jni_dlsym_lookup_stub;
 
   // Alloc
-  ResetQuickAllocEntryPoints(qpoints, /* is_marking */ true);
+  ResetQuickAllocEntryPoints(qpoints, /* is_marking= */ true);
 
   // Resolution and initialization
   qpoints->pInitializeStaticStorage = art_quick_initialize_static_storage;
diff --git a/runtime/entrypoints/quick/quick_deoptimization_entrypoints.cc b/runtime/entrypoints/quick/quick_deoptimization_entrypoints.cc
index c782c9c..2431bce 100644
--- a/runtime/entrypoints/quick/quick_deoptimization_entrypoints.cc
+++ b/runtime/entrypoints/quick/quick_deoptimization_entrypoints.cc
@@ -74,9 +74,9 @@
   JValue return_value;
   return_value.SetJ(0);  // we never deoptimize from compiled code with an invoke result.
   self->PushDeoptimizationContext(return_value,
-                                  false /* is_reference */,
+                                  /* is_reference= */ false,
                                   self->GetException(),
-                                  true /* from_code */,
+                                  /* from_code= */ true,
                                   DeoptimizationMethodType::kDefault);
   artDeoptimizeImpl(self, kind, true);
 }
diff --git a/runtime/entrypoints/quick/quick_dexcache_entrypoints.cc b/runtime/entrypoints/quick/quick_dexcache_entrypoints.cc
index c4d85a3..e939982 100644
--- a/runtime/entrypoints/quick/quick_dexcache_entrypoints.cc
+++ b/runtime/entrypoints/quick/quick_dexcache_entrypoints.cc
@@ -140,7 +140,7 @@
   StackHandleScope<1> hs(self);
   Handle<mirror::Class> h_klass = hs.NewHandle(klass);
   bool success = class_linker->EnsureInitialized(
-      self, h_klass, /* can_init_fields */ true, /* can_init_parents */ true);
+      self, h_klass, /* can_init_fields= */ true, /* can_init_parents= */ true);
   if (UNLIKELY(!success)) {
     return nullptr;
   }
@@ -157,8 +157,8 @@
   ObjPtr<mirror::Class> result = ResolveVerifyAndClinit(dex::TypeIndex(type_idx),
                                                         caller,
                                                         self,
-                                                        /* can_run_clinit */ false,
-                                                        /* verify_access */ false);
+                                                        /* can_run_clinit= */ false,
+                                                        /* verify_access= */ false);
   if (LIKELY(result != nullptr) && CanReferenceBss(caller_and_outer.outer_method, caller)) {
     StoreTypeInBss(caller_and_outer.outer_method, dex::TypeIndex(type_idx), result);
   }
@@ -175,8 +175,8 @@
   ObjPtr<mirror::Class> result = ResolveVerifyAndClinit(dex::TypeIndex(type_idx),
                                                         caller,
                                                         self,
-                                                        /* can_run_clinit */ false,
-                                                        /* verify_access */ true);
+                                                        /* can_run_clinit= */ false,
+                                                        /* verify_access= */ true);
   // Do not StoreTypeInBss(); access check entrypoint is never used together with .bss.
   return result.Ptr();
 }
diff --git a/runtime/entrypoints/quick/quick_field_entrypoints.cc b/runtime/entrypoints/quick/quick_field_entrypoints.cc
index d38e3ed..56232c5 100644
--- a/runtime/entrypoints/quick/quick_field_entrypoints.cc
+++ b/runtime/entrypoints/quick/quick_field_entrypoints.cc
@@ -392,7 +392,7 @@
   constexpr ReadBarrierOption kReadBarrierOption =
       kUseReadBarrier ? kWithReadBarrier : kWithoutReadBarrier;
   mirror::Object* result =
-      ReadBarrier::Barrier<mirror::Object, /* kIsVolatile */ false, kReadBarrierOption>(
+      ReadBarrier::Barrier<mirror::Object, /* kIsVolatile= */ false, kReadBarrierOption>(
         obj,
         MemberOffset(offset),
         ref_addr);
diff --git a/runtime/entrypoints/quick/quick_throw_entrypoints.cc b/runtime/entrypoints/quick/quick_throw_entrypoints.cc
index ba7fb6b..2e447ec 100644
--- a/runtime/entrypoints/quick/quick_throw_entrypoints.cc
+++ b/runtime/entrypoints/quick/quick_throw_entrypoints.cc
@@ -67,7 +67,7 @@
   ScopedQuickEntrypointChecks sqec(self);
   // We come from an explicit check in the generated code. This path is triggered
   // only if the object is indeed null.
-  ThrowNullPointerExceptionFromDexPC(/* check_address */ false, 0U);
+  ThrowNullPointerExceptionFromDexPC(/* check_address= */ false, 0U);
   self->QuickDeliverException();
 }
 
@@ -75,7 +75,7 @@
 extern "C" NO_RETURN void artThrowNullPointerExceptionFromSignal(uintptr_t addr, Thread* self)
     REQUIRES_SHARED(Locks::mutator_lock_) {
   ScopedQuickEntrypointChecks sqec(self);
-  ThrowNullPointerExceptionFromDexPC(/* check_address */ true, addr);
+  ThrowNullPointerExceptionFromDexPC(/* check_address= */ true, addr);
   self->QuickDeliverException();
 }
 
diff --git a/runtime/entrypoints/quick/quick_trampoline_entrypoints.cc b/runtime/entrypoints/quick/quick_trampoline_entrypoints.cc
index 84631c3..1472490 100644
--- a/runtime/entrypoints/quick/quick_trampoline_entrypoints.cc
+++ b/runtime/entrypoints/quick/quick_trampoline_entrypoints.cc
@@ -763,7 +763,7 @@
     uint16_t num_regs = accessor.RegistersSize();
     // No last shadow coming from quick.
     ShadowFrameAllocaUniquePtr shadow_frame_unique_ptr =
-        CREATE_SHADOW_FRAME(num_regs, /* link */ nullptr, method, /* dex pc */ 0);
+        CREATE_SHADOW_FRAME(num_regs, /* link= */ nullptr, method, /* dex_pc= */ 0);
     ShadowFrame* shadow_frame = shadow_frame_unique_ptr.get();
     size_t first_arg_reg = accessor.RegistersSize() - accessor.InsSize();
     BuildQuickShadowFrameVisitor shadow_frame_builder(sp, method->IsStatic(), shorty, shorty_len,
@@ -820,7 +820,7 @@
           result,
           shorty[0] == 'L' || shorty[0] == '[',  /* class or array */
           force_frame_pop ? nullptr : self->GetException(),
-          false /* from_code */,
+          /* from_code= */ false,
           DeoptimizationMethodType::kDefault);
 
       // Set special exception to cause deoptimization.
@@ -912,7 +912,7 @@
   uint32_t shorty_len = 0;
   const char* shorty = non_proxy_method->GetShorty(&shorty_len);
   BuildQuickArgumentVisitor local_ref_visitor(
-      sp, /* is_static */ false, shorty, shorty_len, &soa, &args);
+      sp, /* is_static= */ false, shorty, shorty_len, &soa, &args);
 
   local_ref_visitor.VisitArguments();
   DCHECK_GT(args.size(), 0U) << proxy_method->PrettyMethod();
@@ -975,7 +975,7 @@
                                      const char* shorty,
                                      uint32_t shorty_len,
                                      size_t arg_pos)
-      : QuickArgumentVisitor(sp, /* is_static */ false, shorty, shorty_len),
+      : QuickArgumentVisitor(sp, /* is_static= */ false, shorty, shorty_len),
         cur_pos_(0u),
         arg_pos_(arg_pos),
         ref_arg_(nullptr) {
@@ -1061,7 +1061,7 @@
       << proxy_method->PrettyMethod() << " " << non_proxy_method->PrettyMethod();
   uint32_t shorty_len = 0;
   const char* shorty = non_proxy_method->GetShorty(&shorty_len);
-  GetQuickReferenceArgumentsVisitor ref_args_visitor(sp, /* is_static */ false, shorty, shorty_len);
+  GetQuickReferenceArgumentsVisitor ref_args_visitor(sp, /*is_static=*/ false, shorty, shorty_len);
   ref_args_visitor.VisitArguments();
   std::vector<StackReference<mirror::Object>*> ref_args = ref_args_visitor.GetReferenceArguments();
   return ref_args;
@@ -2709,7 +2709,7 @@
       conflict_method,
       interface_method,
       method,
-      /*force_new_conflict_method*/false);
+      /*force_new_conflict_method=*/false);
   if (new_conflict_method != conflict_method) {
     // Update the IMT if we create a new conflict method. No fence needed here, as the
     // data is consistent.
@@ -2784,7 +2784,7 @@
   const size_t num_vregs = is_range ? inst.VRegA_4rcc() : inst.VRegA_45cc();
   const size_t first_arg = 0;
   ShadowFrameAllocaUniquePtr shadow_frame_unique_ptr =
-      CREATE_SHADOW_FRAME(num_vregs, /* link */ nullptr, resolved_method, dex_pc);
+      CREATE_SHADOW_FRAME(num_vregs, /* link= */ nullptr, resolved_method, dex_pc);
   ShadowFrame* shadow_frame = shadow_frame_unique_ptr.get();
   ScopedStackedShadowFramePusher
       frame_pusher(self, shadow_frame, StackedShadowFrameType::kShadowFrameUnderConstruction);
@@ -2877,7 +2877,7 @@
   const size_t first_arg = 0;
   const size_t num_vregs = ArtMethod::NumArgRegisters(shorty);
   ShadowFrameAllocaUniquePtr shadow_frame_unique_ptr =
-      CREATE_SHADOW_FRAME(num_vregs, /* link */ nullptr, caller_method, dex_pc);
+      CREATE_SHADOW_FRAME(num_vregs, /* link= */ nullptr, caller_method, dex_pc);
   ShadowFrame* shadow_frame = shadow_frame_unique_ptr.get();
   ScopedStackedShadowFramePusher
       frame_pusher(self, shadow_frame, StackedShadowFrameType::kShadowFrameUnderConstruction);
diff --git a/runtime/gc/accounting/atomic_stack.h b/runtime/gc/accounting/atomic_stack.h
index 10af10d..313b2b4 100644
--- a/runtime/gc/accounting/atomic_stack.h
+++ b/runtime/gc/accounting/atomic_stack.h
@@ -253,10 +253,10 @@
   void Init() {
     std::string error_msg;
     mem_map_ = MemMap::MapAnonymous(name_.c_str(),
-                                    /* addr */ nullptr,
+                                    /* addr= */ nullptr,
                                     capacity_ * sizeof(begin_[0]),
                                     PROT_READ | PROT_WRITE,
-                                    /* low_4gb */ false,
+                                    /* low_4gb= */ false,
                                     &error_msg);
     CHECK(mem_map_.IsValid()) << "couldn't allocate mark stack.\n" << error_msg;
     uint8_t* addr = mem_map_.Begin();
diff --git a/runtime/gc/accounting/bitmap.cc b/runtime/gc/accounting/bitmap.cc
index bb2beaa..80c4c76 100644
--- a/runtime/gc/accounting/bitmap.cc
+++ b/runtime/gc/accounting/bitmap.cc
@@ -49,10 +49,10 @@
       RoundUp(num_bits, kBitsPerBitmapWord) / kBitsPerBitmapWord * sizeof(uintptr_t), kPageSize);
   std::string error_msg;
   MemMap mem_map = MemMap::MapAnonymous(name.c_str(),
-                                        /* addr */ nullptr,
+                                        /* addr= */ nullptr,
                                         bitmap_size,
                                         PROT_READ | PROT_WRITE,
-                                        /* low_4gb */ false,
+                                        /* low_4gb= */ false,
                                         &error_msg);
   if (UNLIKELY(!mem_map.IsValid())) {
     LOG(ERROR) << "Failed to allocate bitmap " << name << ": " << error_msg;
diff --git a/runtime/gc/accounting/card_table.cc b/runtime/gc/accounting/card_table.cc
index 7cddec6..9a5bde8 100644
--- a/runtime/gc/accounting/card_table.cc
+++ b/runtime/gc/accounting/card_table.cc
@@ -65,10 +65,10 @@
   /* Allocate an extra 256 bytes to allow fixed low-byte of base */
   std::string error_msg;
   MemMap mem_map = MemMap::MapAnonymous("card table",
-                                        /* addr */ nullptr,
+                                        /* addr= */ nullptr,
                                         capacity + 256,
                                         PROT_READ | PROT_WRITE,
-                                        /* low_4gb */ false,
+                                        /* low_4gb= */ false,
                                         &error_msg);
   CHECK(mem_map.IsValid()) << "couldn't allocate card table: " << error_msg;
   // All zeros is the correct initial value; all clean. Anonymous mmaps are initialized to zero, we
diff --git a/runtime/gc/accounting/mod_union_table.cc b/runtime/gc/accounting/mod_union_table.cc
index 40dc6e1..b4026fc 100644
--- a/runtime/gc/accounting/mod_union_table.cc
+++ b/runtime/gc/accounting/mod_union_table.cc
@@ -462,7 +462,7 @@
     for (mirror::HeapReference<mirror::Object>* obj_ptr : references) {
       if (obj_ptr->AsMirrorPtr() != nullptr) {
         all_null = false;
-        visitor->MarkHeapReference(obj_ptr, /*do_atomic_update*/ false);
+        visitor->MarkHeapReference(obj_ptr, /*do_atomic_update=*/ false);
       }
     }
     count += references.size();
diff --git a/runtime/gc/accounting/read_barrier_table.h b/runtime/gc/accounting/read_barrier_table.h
index 8bdf6da..b369a66 100644
--- a/runtime/gc/accounting/read_barrier_table.h
+++ b/runtime/gc/accounting/read_barrier_table.h
@@ -40,10 +40,10 @@
               static_cast<uint64_t>(static_cast<size_t>(kHeapCapacity / kRegionSize)));
     std::string error_msg;
     mem_map_ = MemMap::MapAnonymous("read barrier table",
-                                    /* addr */ nullptr,
+                                    /* addr= */ nullptr,
                                     capacity,
                                     PROT_READ | PROT_WRITE,
-                                    /* low_4gb */ false,
+                                    /* low_4gb= */ false,
                                     &error_msg);
     CHECK(mem_map_.IsValid() && mem_map_.Begin() != nullptr)
         << "couldn't allocate read barrier table: " << error_msg;
diff --git a/runtime/gc/accounting/remembered_set.cc b/runtime/gc/accounting/remembered_set.cc
index 9dea2f8..fba62c3 100644
--- a/runtime/gc/accounting/remembered_set.cc
+++ b/runtime/gc/accounting/remembered_set.cc
@@ -75,7 +75,7 @@
     mirror::HeapReference<mirror::Object>* ref_ptr = obj->GetFieldObjectReferenceAddr(offset);
     if (target_space_->HasAddress(ref_ptr->AsMirrorPtr())) {
       *contains_reference_to_target_space_ = true;
-      collector_->MarkHeapReference(ref_ptr, /*do_atomic_update*/ false);
+      collector_->MarkHeapReference(ref_ptr, /*do_atomic_update=*/ false);
       DCHECK(!target_space_->HasAddress(ref_ptr->AsMirrorPtr()));
     }
   }
diff --git a/runtime/gc/accounting/space_bitmap.cc b/runtime/gc/accounting/space_bitmap.cc
index 2946486..76d5d9d 100644
--- a/runtime/gc/accounting/space_bitmap.cc
+++ b/runtime/gc/accounting/space_bitmap.cc
@@ -85,10 +85,10 @@
   const size_t bitmap_size = ComputeBitmapSize(heap_capacity);
   std::string error_msg;
   MemMap mem_map = MemMap::MapAnonymous(name.c_str(),
-                                        /* addr */ nullptr,
+                                        /* addr= */ nullptr,
                                         bitmap_size,
                                         PROT_READ | PROT_WRITE,
-                                        /* low_4gb */ false,
+                                        /* low_4gb= */ false,
                                         &error_msg);
   if (UNLIKELY(!mem_map.IsValid())) {
     LOG(ERROR) << "Failed to allocate bitmap " << name << ": " << error_msg;
diff --git a/runtime/gc/allocator/rosalloc.cc b/runtime/gc/allocator/rosalloc.cc
index 0dbafde..8cc0c4e 100644
--- a/runtime/gc/allocator/rosalloc.cc
+++ b/runtime/gc/allocator/rosalloc.cc
@@ -92,10 +92,10 @@
   size_t max_num_of_pages = max_capacity_ / kPageSize;
   std::string error_msg;
   page_map_mem_map_ = MemMap::MapAnonymous("rosalloc page map",
-                                           /* addr */ nullptr,
+                                           /* addr= */ nullptr,
                                            RoundUp(max_num_of_pages, kPageSize),
                                            PROT_READ | PROT_WRITE,
-                                           /* low_4gb */ false,
+                                           /* low_4gb= */ false,
                                            &error_msg);
   CHECK(page_map_mem_map_.IsValid()) << "Couldn't allocate the page map : " << error_msg;
   page_map_ = page_map_mem_map_.Begin();
diff --git a/runtime/gc/collector/concurrent_copying-inl.h b/runtime/gc/collector/concurrent_copying-inl.h
index 3095f9f..8fd235f 100644
--- a/runtime/gc/collector/concurrent_copying-inl.h
+++ b/runtime/gc/collector/concurrent_copying-inl.h
@@ -76,8 +76,8 @@
     // we can avoid an expensive CAS.
     // For the baker case, an object is marked if either the mark bit marked or the bitmap bit is
     // set.
-    success = ref->AtomicSetReadBarrierState(/* expected_rb_state */ ReadBarrier::NonGrayState(),
-                                             /* rb_state */ ReadBarrier::GrayState());
+    success = ref->AtomicSetReadBarrierState(/* expected_rb_state= */ ReadBarrier::NonGrayState(),
+                                             /* rb_state= */ ReadBarrier::GrayState());
   } else {
     success = !bitmap->AtomicTestAndSet(ref);
   }
@@ -113,8 +113,8 @@
     }
     // This may or may not succeed, which is ok because the object may already be gray.
     bool success =
-        ref->AtomicSetReadBarrierState(/* expected_rb_state */ ReadBarrier::NonGrayState(),
-                                       /* rb_state */ ReadBarrier::GrayState());
+        ref->AtomicSetReadBarrierState(/* expected_rb_state= */ ReadBarrier::NonGrayState(),
+                                       /* rb_state= */ ReadBarrier::GrayState());
     if (success) {
       MutexLock mu(self, immune_gray_stack_lock_);
       immune_gray_stack_.push_back(ref);
@@ -186,7 +186,7 @@
         region_space_->Unprotect();
         LOG(FATAL_WITHOUT_ABORT) << DumpHeapReference(holder, offset, from_ref);
         region_space_->DumpNonFreeRegions(LOG_STREAM(FATAL_WITHOUT_ABORT));
-        heap_->GetVerification()->LogHeapCorruption(holder, offset, from_ref, /* fatal */ true);
+        heap_->GetVerification()->LogHeapCorruption(holder, offset, from_ref, /* fatal= */ true);
         UNREACHABLE();
     }
   } else {
@@ -209,8 +209,8 @@
   if (UNLIKELY(mark_from_read_barrier_measurements_)) {
     ret = MarkFromReadBarrierWithMeasurements(self, from_ref);
   } else {
-    ret = Mark</*kGrayImmuneObject*/true, /*kNoUnEvac*/false, /*kFromGCThread*/false>(self,
-                                                                                      from_ref);
+    ret = Mark</*kGrayImmuneObject=*/true, /*kNoUnEvac=*/false, /*kFromGCThread=*/false>(self,
+                                                                                         from_ref);
   }
   // Only set the mark bit for baker barrier.
   if (kUseBakerReadBarrier && LIKELY(!rb_mark_bit_stack_full_ && ret->AtomicSetMarkBit(0, 1))) {
diff --git a/runtime/gc/collector/concurrent_copying.cc b/runtime/gc/collector/concurrent_copying.cc
index 46cc79c..2ae4676 100644
--- a/runtime/gc/collector/concurrent_copying.cc
+++ b/runtime/gc/collector/concurrent_copying.cc
@@ -135,10 +135,10 @@
     std::string error_msg;
     sweep_array_free_buffer_mem_map_ = MemMap::MapAnonymous(
         "concurrent copying sweep array free buffer",
-        /* addr */ nullptr,
+        /* addr= */ nullptr,
         RoundUp(kSweepArrayChunkFreeSize * sizeof(mirror::Object*), kPageSize),
         PROT_READ | PROT_WRITE,
-        /* low_4gb */ false,
+        /* low_4gb= */ false,
         &error_msg);
     CHECK(sweep_array_free_buffer_mem_map_.IsValid())
         << "Couldn't allocate sweep array free buffer: " << error_msg;
@@ -488,7 +488,7 @@
       TimingLogger::ScopedTiming split2("(Paused)SetFromSpace", cc->GetTimings());
       // Only change live bytes for full CC.
       cc->region_space_->SetFromSpace(
-          cc->rb_table_, evac_mode, /*clear_live_bytes*/ !cc->young_gen_);
+          cc->rb_table_, evac_mode, /*clear_live_bytes=*/ !cc->young_gen_);
     }
     cc->SwapStacks();
     if (ConcurrentCopying::kEnableFromSpaceAccountingCheck) {
@@ -601,7 +601,7 @@
         REQUIRES_SHARED(Locks::mutator_lock_) {
       // If an object is not gray, it should only have references to things in the immune spaces.
       if (obj->GetReadBarrierState() != ReadBarrier::GrayState()) {
-        obj->VisitReferences</*kVisitNativeRoots*/true,
+        obj->VisitReferences</*kVisitNativeRoots=*/true,
                              kDefaultVerifyFlags,
                              kWithoutReadBarrier>(visitor, visitor);
       }
@@ -669,8 +669,8 @@
     // Objects on clean cards should never have references to newly allocated regions. Note
     // that aged cards are also not clean.
     if (heap_->GetCardTable()->GetCard(obj) == gc::accounting::CardTable::kCardClean) {
-      VerifyNoMissingCardMarkVisitor internal_visitor(this, /*holder*/ obj);
-      obj->VisitReferences</*kVisitNativeRoots*/true, kVerifyNone, kWithoutReadBarrier>(
+      VerifyNoMissingCardMarkVisitor internal_visitor(this, /*holder=*/ obj);
+      obj->VisitReferences</*kVisitNativeRoots=*/true, kVerifyNone, kWithoutReadBarrier>(
           internal_visitor, internal_visitor);
     }
   };
@@ -742,7 +742,7 @@
   TimingLogger::ScopedTiming split("GrayAllDirtyImmuneObjects", GetTimings());
   accounting::CardTable* const card_table = heap_->GetCardTable();
   Thread* const self = Thread::Current();
-  using VisitorType = GrayImmuneObjectVisitor</* kIsConcurrent */ true>;
+  using VisitorType = GrayImmuneObjectVisitor</* kIsConcurrent= */ true>;
   VisitorType visitor(self);
   WriterMutexLock mu(self, *Locks::heap_bitmap_lock_);
   for (space::ContinuousSpace* space : immune_spaces_.GetSpaces()) {
@@ -769,11 +769,11 @@
                 : card;
           },
           /* card modified visitor */ VoidFunctor());
-      card_table->Scan</* kClearCard */ false>(space->GetMarkBitmap(),
-                                               space->Begin(),
-                                               space->End(),
-                                               visitor,
-                                               gc::accounting::CardTable::kCardAged);
+      card_table->Scan</*kClearCard=*/ false>(space->GetMarkBitmap(),
+                                              space->Begin(),
+                                              space->End(),
+                                              visitor,
+                                              gc::accounting::CardTable::kCardAged);
     }
   }
 }
@@ -781,7 +781,7 @@
 void ConcurrentCopying::GrayAllNewlyDirtyImmuneObjects() {
   TimingLogger::ScopedTiming split("(Paused)GrayAllNewlyDirtyImmuneObjects", GetTimings());
   accounting::CardTable* const card_table = heap_->GetCardTable();
-  using VisitorType = GrayImmuneObjectVisitor</* kIsConcurrent */ false>;
+  using VisitorType = GrayImmuneObjectVisitor</* kIsConcurrent= */ false>;
   Thread* const self = Thread::Current();
   VisitorType visitor(self);
   WriterMutexLock mu(Thread::Current(), *Locks::heap_bitmap_lock_);
@@ -791,11 +791,11 @@
 
     // Don't need to scan aged cards since we did these before the pause. Note that scanning cards
     // also handles the mod-union table cards.
-    card_table->Scan</* kClearCard */ false>(space->GetMarkBitmap(),
-                                             space->Begin(),
-                                             space->End(),
-                                             visitor,
-                                             gc::accounting::CardTable::kCardDirty);
+    card_table->Scan</*kClearCard=*/ false>(space->GetMarkBitmap(),
+                                            space->Begin(),
+                                            space->End(),
+                                            visitor,
+                                            gc::accounting::CardTable::kCardDirty);
     if (table != nullptr) {
       // Add the cards to the mod-union table so that we can clear cards to save RAM.
       table->ProcessCards();
@@ -1376,7 +1376,7 @@
     space::RegionSpace* region_space = RegionSpace();
     CHECK(!region_space->IsInFromSpace(obj)) << "Scanning object " << obj << " in from space";
     VerifyNoFromSpaceRefsFieldVisitor visitor(this);
-    obj->VisitReferences</*kVisitNativeRoots*/true, kDefaultVerifyFlags, kWithoutReadBarrier>(
+    obj->VisitReferences</*kVisitNativeRoots=*/true, kDefaultVerifyFlags, kWithoutReadBarrier>(
         visitor,
         visitor);
     if (kUseBakerReadBarrier) {
@@ -1558,8 +1558,8 @@
   MarkStackMode mark_stack_mode = mark_stack_mode_.load(std::memory_order_relaxed);
   if (mark_stack_mode == kMarkStackModeThreadLocal) {
     // Process the thread-local mark stacks and the GC mark stack.
-    count += ProcessThreadLocalMarkStacks(/* disable_weak_ref_access */ false,
-                                          /* checkpoint_callback */ nullptr);
+    count += ProcessThreadLocalMarkStacks(/* disable_weak_ref_access= */ false,
+                                          /* checkpoint_callback= */ nullptr);
     while (!gc_mark_stack_->IsEmpty()) {
       mirror::Object* to_ref = gc_mark_stack_->PopBack();
       ProcessMarkStackRef(to_ref);
@@ -1734,7 +1734,7 @@
     CHECK(!region_space->IsInFromSpace(to_ref)) << "Scanning object " << to_ref << " in from space";
     AssertToSpaceInvariant(nullptr, MemberOffset(0), to_ref);
     AssertToSpaceInvariantFieldVisitor visitor(this);
-    to_ref->VisitReferences</*kVisitNativeRoots*/true, kDefaultVerifyFlags, kWithoutReadBarrier>(
+    to_ref->VisitReferences</*kVisitNativeRoots=*/true, kDefaultVerifyFlags, kWithoutReadBarrier>(
         visitor,
         visitor);
   }
@@ -1769,7 +1769,7 @@
   DisableWeakRefAccessCallback dwrac(this);
   // Process the thread local mark stacks one last time after switching to the shared mark stack
   // mode and disable weak ref accesses.
-  ProcessThreadLocalMarkStacks(/* disable_weak_ref_access */ true, &dwrac);
+  ProcessThreadLocalMarkStacks(/* disable_weak_ref_access= */ true, &dwrac);
   if (kVerboseMode) {
     LOG(INFO) << "Switched to shared mark stack mode and disabled weak ref access";
   }
@@ -1833,7 +1833,7 @@
 void ConcurrentCopying::Sweep(bool swap_bitmaps) {
   if (kEnableGenerationalConcurrentCopyingCollection && young_gen_) {
     // Only sweep objects on the live stack.
-    SweepArray(heap_->GetLiveStack(), /* swap_bitmaps */ false);
+    SweepArray(heap_->GetLiveStack(), /* swap_bitmaps= */ false);
   } else {
     {
       TimingLogger::ScopedTiming t("MarkStackAsLive", GetTimings());
@@ -2060,7 +2060,7 @@
 
   {
     WriterMutexLock mu(self, *Locks::heap_bitmap_lock_);
-    Sweep(/* swap_bitmaps */ false);
+    Sweep(/* swap_bitmaps= */ false);
     SwapBitmaps();
     heap_->UnBindBitmaps();
 
@@ -2171,7 +2171,7 @@
         LOG(FATAL_WITHOUT_ABORT) << "Non-free regions:";
         region_space_->DumpNonFreeRegions(LOG_STREAM(FATAL_WITHOUT_ABORT));
         PrintFileToLog("/proc/self/maps", LogSeverity::FATAL_WITHOUT_ABORT);
-        MemMap::DumpMaps(LOG_STREAM(FATAL_WITHOUT_ABORT), /* terse */ true);
+        MemMap::DumpMaps(LOG_STREAM(FATAL_WITHOUT_ABORT), /* terse= */ true);
         LOG(FATAL) << "Invalid reference " << ref
                    << " referenced from object " << obj << " at offset " << offset;
       }
@@ -2264,12 +2264,12 @@
         LOG(FATAL_WITHOUT_ABORT) << "Non-free regions:";
         region_space_->DumpNonFreeRegions(LOG_STREAM(FATAL_WITHOUT_ABORT));
         PrintFileToLog("/proc/self/maps", LogSeverity::FATAL_WITHOUT_ABORT);
-        MemMap::DumpMaps(LOG_STREAM(FATAL_WITHOUT_ABORT), /* terse */ true);
+        MemMap::DumpMaps(LOG_STREAM(FATAL_WITHOUT_ABORT), /* terse= */ true);
         LOG(FATAL) << "Invalid reference " << ref;
       }
     } else {
       // Check to-space invariant in non-moving space.
-      AssertToSpaceInvariantInNonMovingSpace(/* obj */ nullptr, ref);
+      AssertToSpaceInvariantInNonMovingSpace(/* obj= */ nullptr, ref);
     }
   }
 }
@@ -2440,7 +2440,7 @@
   void VisitRoot(mirror::CompressedReference<mirror::Object>* root) const
       ALWAYS_INLINE
       REQUIRES_SHARED(Locks::mutator_lock_) {
-    collector_->MarkRoot</*kGrayImmuneObject*/false>(thread_, root);
+    collector_->MarkRoot</*kGrayImmuneObject=*/false>(thread_, root);
   }
 
  private:
@@ -2462,7 +2462,7 @@
   DCHECK_EQ(Thread::Current(), thread_running_gc_);
   RefFieldsVisitor<kNoUnEvac> visitor(this, thread_running_gc_);
   // Disable the read barrier for a performance reason.
-  to_ref->VisitReferences</*kVisitNativeRoots*/true, kDefaultVerifyFlags, kWithoutReadBarrier>(
+  to_ref->VisitReferences</*kVisitNativeRoots=*/true, kDefaultVerifyFlags, kWithoutReadBarrier>(
       visitor, visitor);
   if (kDisallowReadBarrierDuringScan && !Runtime::Current()->IsActiveTransaction()) {
     thread_running_gc_->ModifyDebugDisallowReadBarrier(-1);
@@ -2476,10 +2476,10 @@
   DCHECK_EQ(Thread::Current(), thread_running_gc_);
   mirror::Object* ref = obj->GetFieldObject<
       mirror::Object, kVerifyNone, kWithoutReadBarrier, false>(offset);
-  mirror::Object* to_ref = Mark</*kGrayImmuneObject*/false, kNoUnEvac, /*kFromGCThread*/true>(
+  mirror::Object* to_ref = Mark</*kGrayImmuneObject=*/false, kNoUnEvac, /*kFromGCThread=*/true>(
       thread_running_gc_,
       ref,
-      /*holder*/ obj,
+      /*holder=*/ obj,
       offset);
   if (to_ref == ref) {
     return;
@@ -2553,7 +2553,7 @@
     mirror::CompressedReference<mirror::Object>* const root = roots[i];
     if (!root->IsNull()) {
       // kGrayImmuneObject is true because this is used for the thread flip.
-      MarkRoot</*kGrayImmuneObject*/true>(self, root);
+      MarkRoot</*kGrayImmuneObject=*/true>(self, root);
     }
   }
 }
@@ -2702,7 +2702,7 @@
   if (UNLIKELY(klass == nullptr)) {
     // Remove memory protection from the region space and log debugging information.
     region_space_->Unprotect();
-    heap_->GetVerification()->LogHeapCorruption(holder, offset, from_ref, /* fatal */ true);
+    heap_->GetVerification()->LogHeapCorruption(holder, offset, from_ref, /* fatal= */ true);
   }
   // There must not be a read barrier to avoid nested RB that might violate the to-space invariant.
   // Note that from_ref is a from space ref so the SizeOf() call will access the from-space meta
@@ -2716,7 +2716,7 @@
   size_t bytes_allocated = 0U;
   size_t dummy;
   bool fall_back_to_non_moving = false;
-  mirror::Object* to_ref = region_space_->AllocNonvirtual</*kForEvac*/ true>(
+  mirror::Object* to_ref = region_space_->AllocNonvirtual</*kForEvac=*/ true>(
       region_space_alloc_size, &region_space_bytes_allocated, nullptr, &dummy);
   bytes_allocated = region_space_bytes_allocated;
   if (LIKELY(to_ref != nullptr)) {
@@ -2790,7 +2790,7 @@
         DCHECK(region_space_->IsInToSpace(to_ref));
         if (bytes_allocated > space::RegionSpace::kRegionSize) {
           // Free the large alloc.
-          region_space_->FreeLarge</*kForEvac*/ true>(to_ref, bytes_allocated);
+          region_space_->FreeLarge</*kForEvac=*/ true>(to_ref, bytes_allocated);
         } else {
           // Record the lost copy for later reuse.
           heap_->num_bytes_allocated_.fetch_add(bytes_allocated, std::memory_order_relaxed);
@@ -3017,7 +3017,7 @@
         // AtomicSetReadBarrierState since it will fault if the address is not
         // valid.
         region_space_->Unprotect();
-        heap_->GetVerification()->LogHeapCorruption(holder, offset, ref, /* fatal */ true);
+        heap_->GetVerification()->LogHeapCorruption(holder, offset, ref, /* fatal= */ true);
       }
       // Not marked nor on the allocation stack. Try to mark it.
       // This may or may not succeed, which is ok.
@@ -3131,7 +3131,7 @@
       } while (!field->CasWeakRelaxed(from_ref, to_ref));
     } else {
       // TODO: Why is this seq_cst when the above is relaxed? Document memory ordering.
-      field->Assign</* kIsVolatile */ true>(to_ref);
+      field->Assign</* kIsVolatile= */ true>(to_ref);
     }
   }
   return true;
@@ -3151,7 +3151,7 @@
   // We don't really need to lock the heap bitmap lock as we use CAS to mark in bitmaps.
   WriterMutexLock mu(self, *Locks::heap_bitmap_lock_);
   GetHeap()->GetReferenceProcessor()->ProcessReferences(
-      true /*concurrent*/, GetTimings(), GetCurrentIteration()->GetClearSoftReferences(), this);
+      /*concurrent=*/ true, GetTimings(), GetCurrentIteration()->GetClearSoftReferences(), this);
 }
 
 void ConcurrentCopying::RevokeAllThreadLocalBuffers() {
@@ -3169,7 +3169,8 @@
   ScopedTrace tr(__FUNCTION__);
   const uint64_t start_time = measure_read_barrier_slow_path_ ? NanoTime() : 0u;
   mirror::Object* ret =
-      Mark</*kGrayImmuneObject*/true, /*kNoUnEvac*/false, /*kFromGCThread*/false>(self, from_ref);
+      Mark</*kGrayImmuneObject=*/true, /*kNoUnEvac=*/false, /*kFromGCThread=*/false>(self,
+                                                                                     from_ref);
   if (measure_read_barrier_slow_path_) {
     rb_slow_path_ns_.fetch_add(NanoTime() - start_time, std::memory_order_relaxed);
   }
diff --git a/runtime/gc/collector/immune_spaces.cc b/runtime/gc/collector/immune_spaces.cc
index 3b59618..3c20e51 100644
--- a/runtime/gc/collector/immune_spaces.cc
+++ b/runtime/gc/collector/immune_spaces.cc
@@ -57,7 +57,7 @@
       if (image_oat_file != nullptr) {
         intervals.push_back(Interval(reinterpret_cast<uintptr_t>(image_oat_file->Begin()),
                                      reinterpret_cast<uintptr_t>(image_oat_file->End()),
-                                     /*image*/false));
+                                     /*image=*/false));
       }
     }
     intervals.push_back(Interval(space_begin, space_end, /*is_heap*/true));
diff --git a/runtime/gc/collector/immune_spaces_test.cc b/runtime/gc/collector/immune_spaces_test.cc
index 3f85c71..0e5fac1 100644
--- a/runtime/gc/collector/immune_spaces_test.cc
+++ b/runtime/gc/collector/immune_spaces_test.cc
@@ -32,7 +32,7 @@
 
 class DummyOatFile : public OatFile {
  public:
-  DummyOatFile(uint8_t* begin, uint8_t* end) : OatFile("Location", /*is_executable*/ false) {
+  DummyOatFile(uint8_t* begin, uint8_t* end) : OatFile("Location", /*executable=*/ false) {
     begin_ = begin;
     end_ = end;
   }
@@ -45,7 +45,7 @@
                   std::unique_ptr<DummyOatFile>&& oat_file,
                   MemMap&& oat_map)
       : ImageSpace("DummyImageSpace",
-                   /*image_location*/"",
+                   /*image_location=*/"",
                    std::move(map),
                    std::move(live_bitmap),
                    map.End()),
@@ -87,7 +87,7 @@
                                       image_begin,
                                       image_size,
                                       PROT_READ | PROT_WRITE,
-                                      /*low_4gb*/true,
+                                      /*low_4gb=*/true,
                                       &error_str);
     if (!map.IsValid()) {
       LOG(ERROR) << error_str;
@@ -100,7 +100,7 @@
                                           oat_begin,
                                           oat_size,
                                           PROT_READ | PROT_WRITE,
-                                          /*low_4gb*/true,
+                                          /*low_4gb=*/true,
                                           &error_str);
     if (!oat_map.IsValid()) {
       LOG(ERROR) << error_str;
@@ -110,23 +110,23 @@
     // Create image header.
     ImageSection sections[ImageHeader::kSectionCount];
     new (map.Begin()) ImageHeader(
-        /*image_begin*/PointerToLowMemUInt32(map.Begin()),
-        /*image_size*/map.Size(),
+        /*image_begin=*/PointerToLowMemUInt32(map.Begin()),
+        /*image_size=*/map.Size(),
         sections,
-        /*image_roots*/PointerToLowMemUInt32(map.Begin()) + 1,
-        /*oat_checksum*/0u,
+        /*image_roots=*/PointerToLowMemUInt32(map.Begin()) + 1,
+        /*oat_checksum=*/0u,
         // The oat file data in the header is always right after the image space.
-        /*oat_file_begin*/PointerToLowMemUInt32(oat_begin),
-        /*oat_data_begin*/PointerToLowMemUInt32(oat_begin),
-        /*oat_data_end*/PointerToLowMemUInt32(oat_begin + oat_size),
-        /*oat_file_end*/PointerToLowMemUInt32(oat_begin + oat_size),
-        /*boot_image_begin*/0u,
-        /*boot_image_size*/0u,
-        /*boot_oat_begin*/0u,
-        /*boot_oat_size*/0u,
-        /*pointer_size*/sizeof(void*),
+        /*oat_file_begin=*/PointerToLowMemUInt32(oat_begin),
+        /*oat_data_begin=*/PointerToLowMemUInt32(oat_begin),
+        /*oat_data_end=*/PointerToLowMemUInt32(oat_begin + oat_size),
+        /*oat_file_end=*/PointerToLowMemUInt32(oat_begin + oat_size),
+        /*boot_image_begin=*/0u,
+        /*boot_image_size=*/0u,
+        /*boot_oat_begin=*/0u,
+        /*boot_oat_size=*/0u,
+        /*pointer_size=*/sizeof(void*),
         ImageHeader::kStorageModeUncompressed,
-        /*storage_size*/0u);
+        /*data_size=*/0u);
     return new DummyImageSpace(std::move(map),
                                std::move(live_bitmap),
                                std::move(oat_file),
@@ -138,10 +138,10 @@
   static uint8_t* GetContinuousMemoryRegion(size_t size) {
     std::string error_str;
     MemMap map = MemMap::MapAnonymous("reserve",
-                                      /* addr */ nullptr,
+                                      /* addr= */ nullptr,
                                       size,
                                       PROT_READ | PROT_WRITE,
-                                      /*low_4gb*/ true,
+                                      /*low_4gb=*/ true,
                                       &error_str);
     if (!map.IsValid()) {
       LOG(ERROR) << "Failed to allocate memory region " << error_str;
@@ -163,7 +163,7 @@
                         space::kGcRetentionPolicyNeverCollect,
                         begin,
                         end,
-                        /*limit*/end) {}
+                        /*limit=*/end) {}
 
   space::SpaceType GetType() const override {
     return space::kSpaceTypeMallocSpace;
diff --git a/runtime/gc/collector/mark_sweep.cc b/runtime/gc/collector/mark_sweep.cc
index 5f44a72..399f9ff 100644
--- a/runtime/gc/collector/mark_sweep.cc
+++ b/runtime/gc/collector/mark_sweep.cc
@@ -105,10 +105,10 @@
   std::string error_msg;
   sweep_array_free_buffer_mem_map_ = MemMap::MapAnonymous(
       "mark sweep sweep array free buffer",
-      /* addr */ nullptr,
+      /* addr= */ nullptr,
       RoundUp(kSweepArrayChunkFreeSize * sizeof(mirror::Object*), kPageSize),
       PROT_READ | PROT_WRITE,
-      /* low_4gb */ false,
+      /* low_4gb= */ false,
       &error_msg);
   CHECK(sweep_array_free_buffer_mem_map_.IsValid())
       << "Couldn't allocate sweep array free buffer: " << error_msg;
@@ -283,9 +283,9 @@
   // cards (during the call to Heap::ProcessCard) are not reordered
   // *after* marking actually starts?
   heap_->ProcessCards(GetTimings(),
-                      /* use_rem_sets */ false,
-                      /* process_alloc_space_cards */ true,
-                      /* clear_alloc_space_cards */ GetGcType() != kGcTypeSticky);
+                      /* use_rem_sets= */ false,
+                      /* process_alloc_space_cards= */ true,
+                      /* clear_alloc_space_cards= */ GetGcType() != kGcTypeSticky);
   WriterMutexLock mu(self, *Locks::heap_bitmap_lock_);
   MarkRoots(self);
   MarkReachableObjects();
@@ -446,7 +446,7 @@
                      !large_object_space->Contains(obj)))) {
       // Lowest priority logging first:
       PrintFileToLog("/proc/self/maps", LogSeverity::FATAL_WITHOUT_ABORT);
-      MemMap::DumpMaps(LOG_STREAM(FATAL_WITHOUT_ABORT), /* terse */ true);
+      MemMap::DumpMaps(LOG_STREAM(FATAL_WITHOUT_ABORT), /* terse= */ true);
       // Buffer the output in the string stream since it is more important than the stack traces
       // and we want it to have log priority. The stack traces are printed from Runtime::Abort
       // which is called from LOG(FATAL) but before the abort message.
diff --git a/runtime/gc/collector/semi_space.cc b/runtime/gc/collector/semi_space.cc
index c58b59d..19b1fc7 100644
--- a/runtime/gc/collector/semi_space.cc
+++ b/runtime/gc/collector/semi_space.cc
@@ -728,7 +728,7 @@
   DCHECK(!from_space_->HasAddress(obj)) << "Scanning object " << obj << " in from space";
   MarkObjectVisitor visitor(this);
   // Turn off read barrier. ZygoteCompactingCollector doesn't use it (even in the CC build.)
-  obj->VisitReferences</*kVisitNativeRoots*/true, kDefaultVerifyFlags, kWithoutReadBarrier>(
+  obj->VisitReferences</*kVisitNativeRoots=*/true, kDefaultVerifyFlags, kWithoutReadBarrier>(
       visitor, visitor);
 }
 
diff --git a/runtime/gc/heap-inl.h b/runtime/gc/heap-inl.h
index af9000b..e253dfb 100644
--- a/runtime/gc/heap-inl.h
+++ b/runtime/gc/heap-inl.h
@@ -129,10 +129,10 @@
         if (!self->IsExceptionPending()) {
           // AllocObject will pick up the new allocator type, and instrumented as true is the safe
           // default.
-          return AllocObject</*kInstrumented*/true>(self,
-                                                    klass,
-                                                    byte_count,
-                                                    pre_fence_visitor);
+          return AllocObject</*kInstrumented=*/true>(self,
+                                                     klass,
+                                                     byte_count,
+                                                     pre_fence_visitor);
         }
         return nullptr;
       }
diff --git a/runtime/gc/heap.cc b/runtime/gc/heap.cc
index 78e8422..a31cbe7 100644
--- a/runtime/gc/heap.cc
+++ b/runtime/gc/heap.cc
@@ -433,8 +433,8 @@
           request_begin,
           capacity_,
           PROT_READ | PROT_WRITE,
-          /* low_4gb */ true,
-          /* reuse */ false,
+          /* low_4gb= */ true,
+          /* reuse= */ false,
           heap_reservation.IsValid() ? &heap_reservation : nullptr,
           &error_str);
     }
@@ -463,7 +463,7 @@
                                                                initial_size,
                                                                size,
                                                                size,
-                                                               /* can_move_objects */ false);
+                                                               /* can_move_objects= */ false);
     CHECK(non_moving_space_ != nullptr) << "Failed creating non moving space "
         << non_moving_space_mem_map_begin;
     non_moving_space_->SetFootprintLimit(non_moving_space_->Capacity());
@@ -505,11 +505,11 @@
       // Create bump pointer spaces instead of a backup space.
       main_mem_map_2.Reset();
       bump_pointer_space_ = space::BumpPointerSpace::Create(
-          "Bump pointer space 1", kGSSBumpPointerSpaceCapacity, /* requested_begin */ nullptr);
+          "Bump pointer space 1", kGSSBumpPointerSpaceCapacity, /* requested_begin= */ nullptr);
       CHECK(bump_pointer_space_ != nullptr);
       AddSpace(bump_pointer_space_);
       temp_space_ = space::BumpPointerSpace::Create(
-          "Bump pointer space 2", kGSSBumpPointerSpaceCapacity, /* requested_begin */ nullptr);
+          "Bump pointer space 2", kGSSBumpPointerSpaceCapacity, /* requested_begin= */ nullptr);
       CHECK(temp_space_ != nullptr);
       AddSpace(temp_space_);
     } else if (main_mem_map_2.IsValid()) {
@@ -519,7 +519,7 @@
                                                            growth_limit_,
                                                            capacity_,
                                                            name,
-                                                           /* can_move_objects */ true));
+                                                           /* can_move_objects= */ true));
       CHECK(main_space_backup_.get() != nullptr);
       // Add the space so its accounted for in the heap_begin and heap_end.
       AddSpace(main_space_backup_.get());
@@ -634,13 +634,13 @@
     }
     if (MayUseCollector(kCollectorTypeCC)) {
       concurrent_copying_collector_ = new collector::ConcurrentCopying(this,
-                                                                       /*young_gen*/false,
+                                                                       /*young_gen=*/false,
                                                                        "",
                                                                        measure_gc_performance);
       if (kEnableGenerationalConcurrentCopyingCollection) {
         young_concurrent_copying_collector_ = new collector::ConcurrentCopying(
             this,
-            /*young_gen*/true,
+            /*young_gen=*/true,
             "young",
             measure_gc_performance);
       }
@@ -671,7 +671,7 @@
     bool no_gap = MemMap::CheckNoGaps(*first_space->GetMemMap(), *non_moving_space_->GetMemMap());
     if (!no_gap) {
       PrintFileToLog("/proc/self/maps", LogSeverity::ERROR);
-      MemMap::DumpMaps(LOG_STREAM(ERROR), /* terse */ true);
+      MemMap::DumpMaps(LOG_STREAM(ERROR), /* terse= */ true);
       LOG(FATAL) << "There's a gap between the image space and the non-moving space";
     }
   }
@@ -696,7 +696,7 @@
                                       request_begin,
                                       capacity,
                                       PROT_READ | PROT_WRITE,
-                                      /* low_4gb*/ true,
+                                      /* low_4gb=*/ true,
                                       out_error_str);
     if (map.IsValid() || request_begin == nullptr) {
       return map;
@@ -1323,7 +1323,7 @@
       // Invoke CC full compaction.
       CollectGarbageInternal(collector::kGcTypeFull,
                              kGcCauseCollectorTransition,
-                             /*clear_soft_references*/false);
+                             /*clear_soft_references=*/false);
     } else {
       VLOG(gc) << "CC background compaction ignored due to jank perceptible process state";
     }
@@ -1783,7 +1783,7 @@
           break;
         }
         // Try to transition the heap if the allocation failure was due to the space being full.
-        if (!IsOutOfMemoryOnAllocation(allocator, alloc_size, /*grow*/ false)) {
+        if (!IsOutOfMemoryOnAllocation(allocator, alloc_size, /*grow=*/ false)) {
           // If we aren't out of memory then the OOM was probably from the non moving space being
           // full. Attempt to disable compaction and turn the main space into a non moving space.
           DisableMovingGc();
@@ -3870,7 +3870,7 @@
     // Trigger another GC because there have been enough native bytes
     // allocated since the last GC.
     if (IsGcConcurrent()) {
-      RequestConcurrentGC(ThreadForEnv(env), kGcCauseForNativeAlloc, /*force_full*/true);
+      RequestConcurrentGC(ThreadForEnv(env), kGcCauseForNativeAlloc, /*force_full=*/true);
     } else {
       CollectGarbageInternal(NonStickyGcType(), kGcCauseForNativeAlloc, false);
     }
@@ -3916,7 +3916,7 @@
       << " IsVariableSize=" << c->IsVariableSize()
       << " ObjectSize=" << c->GetObjectSize()
       << " sizeof(Class)=" << sizeof(mirror::Class)
-      << " " << verification_->DumpObjectInfo(c.Ptr(), /*tag*/ "klass");
+      << " " << verification_->DumpObjectInfo(c.Ptr(), /*tag=*/ "klass");
   CHECK_GE(byte_count, sizeof(mirror::Object));
 }
 
@@ -4012,7 +4012,7 @@
     {
       static constexpr size_t kMaxFrames = 16u;
       FixedSizeBacktrace<kMaxFrames> backtrace;
-      backtrace.Collect(/* skip_frames */ 2);
+      backtrace.Collect(/* skip_count= */ 2);
       uint64_t hash = backtrace.Hash();
       MutexLock mu(self, *backtrace_lock_);
       new_backtrace = seen_backtraces_.find(hash) == seen_backtraces_.end();
@@ -4023,7 +4023,7 @@
     if (new_backtrace) {
       StackHandleScope<1> hs(self);
       auto h = hs.NewHandleWrapper(obj);
-      CollectGarbage(/* clear_soft_references */ false);
+      CollectGarbage(/* clear_soft_references= */ false);
       unique_backtrace_count_.fetch_add(1);
     } else {
       seen_backtrace_count_.fetch_add(1);
diff --git a/runtime/gc/heap_test.cc b/runtime/gc/heap_test.cc
index 05a04f2..a133a10 100644
--- a/runtime/gc/heap_test.cc
+++ b/runtime/gc/heap_test.cc
@@ -37,7 +37,7 @@
                                      gc::Heap::kPreferredAllocSpaceBegin,
                                      16 * KB,
                                      PROT_READ,
-                                     /*low_4gb*/ true,
+                                     /*low_4gb=*/ true,
                                      &error_msg);
     ASSERT_TRUE(reserved_.IsValid()) << error_msg;
     CommonRuntimeTest::SetUp();
@@ -77,7 +77,7 @@
       }
     }
   }
-  Runtime::Current()->GetHeap()->CollectGarbage(/* clear_soft_references */ false);
+  Runtime::Current()->GetHeap()->CollectGarbage(/* clear_soft_references= */ false);
 }
 
 TEST_F(HeapTest, HeapBitmapCapacityTest) {
@@ -91,7 +91,7 @@
 }
 
 TEST_F(HeapTest, DumpGCPerformanceOnShutdown) {
-  Runtime::Current()->GetHeap()->CollectGarbage(/* clear_soft_references */ false);
+  Runtime::Current()->GetHeap()->CollectGarbage(/* clear_soft_references= */ false);
   Runtime::Current()->SetDumpGCPerformanceOnShutdown(true);
 }
 
diff --git a/runtime/gc/reference_processor.cc b/runtime/gc/reference_processor.cc
index c212bad..d4af117 100644
--- a/runtime/gc/reference_processor.cc
+++ b/runtime/gc/reference_processor.cc
@@ -60,16 +60,16 @@
 static inline void SetSlowPathFlag(bool enabled) REQUIRES_SHARED(Locks::mutator_lock_) {
   ObjPtr<mirror::Class> reference_class = GetClassRoot<mirror::Reference>();
   MemberOffset slow_path_offset = GetSlowPathFlagOffset(reference_class);
-  reference_class->SetFieldBoolean</* kTransactionActive */ false, /* kCheckTransaction */ false>(
+  reference_class->SetFieldBoolean</* kTransactionActive= */ false, /* kCheckTransaction= */ false>(
       slow_path_offset, enabled ? 1 : 0);
 }
 
 void ReferenceProcessor::EnableSlowPath() {
-  SetSlowPathFlag(/* enabled */ true);
+  SetSlowPathFlag(/* enabled= */ true);
 }
 
 void ReferenceProcessor::DisableSlowPath(Thread* self) {
-  SetSlowPathFlag(/* enabled */ false);
+  SetSlowPathFlag(/* enabled= */ false);
   condition_.Broadcast(self);
 }
 
@@ -238,13 +238,13 @@
   mirror::HeapReference<mirror::Object>* referent = ref->GetReferentReferenceAddr();
   // do_atomic_update needs to be true because this happens outside of the reference processing
   // phase.
-  if (!collector->IsNullOrMarkedHeapReference(referent, /*do_atomic_update*/true)) {
+  if (!collector->IsNullOrMarkedHeapReference(referent, /*do_atomic_update=*/true)) {
     if (UNLIKELY(collector->IsTransactionActive())) {
       // In transaction mode, keep the referent alive and avoid any reference processing to avoid the
       // issue of rolling back reference processing.  do_atomic_update needs to be true because this
       // happens outside of the reference processing phase.
       if (!referent->IsNull()) {
-        collector->MarkHeapReference(referent, /*do_atomic_update*/ true);
+        collector->MarkHeapReference(referent, /*do_atomic_update=*/ true);
       }
       return;
     }
diff --git a/runtime/gc/reference_queue.cc b/runtime/gc/reference_queue.cc
index e25e279..5c11e50 100644
--- a/runtime/gc/reference_queue.cc
+++ b/runtime/gc/reference_queue.cc
@@ -136,7 +136,7 @@
     mirror::HeapReference<mirror::Object>* referent_addr = ref->GetReferentReferenceAddr();
     // do_atomic_update is false because this happens during the reference processing phase where
     // Reference.clear() would block.
-    if (!collector->IsNullOrMarkedHeapReference(referent_addr, /*do_atomic_update*/false)) {
+    if (!collector->IsNullOrMarkedHeapReference(referent_addr, /*do_atomic_update=*/false)) {
       // Referent is white, clear it.
       if (Runtime::Current()->IsActiveTransaction()) {
         ref->ClearReferent<true>();
@@ -158,7 +158,7 @@
     mirror::HeapReference<mirror::Object>* referent_addr = ref->GetReferentReferenceAddr();
     // do_atomic_update is false because this happens during the reference processing phase where
     // Reference.clear() would block.
-    if (!collector->IsNullOrMarkedHeapReference(referent_addr, /*do_atomic_update*/false)) {
+    if (!collector->IsNullOrMarkedHeapReference(referent_addr, /*do_atomic_update=*/false)) {
       ObjPtr<mirror::Object> forward_address = collector->MarkObject(referent_addr->AsMirrorPtr());
       // Move the updated referent to the zombie field.
       if (Runtime::Current()->IsActiveTransaction()) {
@@ -187,7 +187,7 @@
     if (referent_addr->AsMirrorPtr() != nullptr) {
       // do_atomic_update is false because mutators can't access the referent due to the weak ref
       // access blocking.
-      visitor->MarkHeapReference(referent_addr, /*do_atomic_update*/ false);
+      visitor->MarkHeapReference(referent_addr, /*do_atomic_update=*/ false);
     }
     ref = ref->GetPendingNext();
   } while (LIKELY(ref != head));
diff --git a/runtime/gc/space/bump_pointer_space.cc b/runtime/gc/space/bump_pointer_space.cc
index 80af700..497a0c2 100644
--- a/runtime/gc/space/bump_pointer_space.cc
+++ b/runtime/gc/space/bump_pointer_space.cc
@@ -32,7 +32,7 @@
                                         requested_begin,
                                         capacity,
                                         PROT_READ | PROT_WRITE,
-                                        /* low_4gb */ true,
+                                        /* low_4gb= */ true,
                                         &error_msg);
   if (!mem_map.IsValid()) {
     LOG(ERROR) << "Failed to allocate pages for alloc space (" << name << ") of size "
diff --git a/runtime/gc/space/dlmalloc_space.cc b/runtime/gc/space/dlmalloc_space.cc
index 36d2161..73582a0 100644
--- a/runtime/gc/space/dlmalloc_space.cc
+++ b/runtime/gc/space/dlmalloc_space.cc
@@ -54,7 +54,7 @@
                   end,
                   limit,
                   growth_limit,
-                  /* create_bitmaps */ true,
+                  /* create_bitmaps= */ true,
                   can_move_objects,
                   starting_size, initial_size),
       mspace_(mspace) {
diff --git a/runtime/gc/space/image_space.cc b/runtime/gc/space/image_space.cc
index 6e6023d..9e67957 100644
--- a/runtime/gc/space/image_space.cc
+++ b/runtime/gc/space/image_space.cc
@@ -509,21 +509,11 @@
     const size_t image_bitmap_offset = RoundUp(sizeof(ImageHeader) + image_header->GetDataSize(),
                                                kPageSize);
     const size_t end_of_bitmap = image_bitmap_offset + bitmap_section.Size();
-    const ImageSection& relocations_section = image_header->GetImageRelocationsSection();
-    if (relocations_section.Offset() != bitmap_section.Offset() + bitmap_section.Size()) {
+    if (end_of_bitmap != image_file_size) {
       *error_msg = StringPrintf(
-          "Relocations do not start immediately after bitmap: %u vs. %u + %u.",
-          relocations_section.Offset(),
-          bitmap_section.Offset(),
-          bitmap_section.Size());
-      return nullptr;
-    }
-    const size_t end_of_relocations = end_of_bitmap + relocations_section.Size();
-    if (end_of_relocations != image_file_size) {
-      *error_msg = StringPrintf(
-          "Image file size does not equal end of relocations: size=%" PRIu64 " vs. %zu.",
+          "Image file size does not equal end of bitmap: size=%" PRIu64 " vs. %zu.",
           image_file_size,
-          end_of_relocations);
+          end_of_bitmap);
       return nullptr;
     }
 
diff --git a/runtime/gc/space/large_object_space.cc b/runtime/gc/space/large_object_space.cc
index b783cfe..a7f82f6 100644
--- a/runtime/gc/space/large_object_space.cc
+++ b/runtime/gc/space/large_object_space.cc
@@ -137,10 +137,10 @@
                                            size_t* bytes_tl_bulk_allocated) {
   std::string error_msg;
   MemMap mem_map = MemMap::MapAnonymous("large object space allocation",
-                                        /* addr */ nullptr,
+                                        /* addr= */ nullptr,
                                         num_bytes,
                                         PROT_READ | PROT_WRITE,
-                                        /* low_4gb */ true,
+                                        /* low_4gb= */ true,
                                         &error_msg);
   if (UNLIKELY(!mem_map.IsValid())) {
     LOG(WARNING) << "Large object allocation failed: " << error_msg;
@@ -353,7 +353,7 @@
                                         requested_begin,
                                         size,
                                         PROT_READ | PROT_WRITE,
-                                        /* low_4gb */ true,
+                                        /* low_4gb= */ true,
                                         &error_msg);
   CHECK(mem_map.IsValid()) << "Failed to allocate large object space mem map: " << error_msg;
   return new FreeListSpace(name, std::move(mem_map), mem_map.Begin(), mem_map.End());
@@ -372,10 +372,10 @@
   std::string error_msg;
   allocation_info_map_ =
       MemMap::MapAnonymous("large object free list space allocation info map",
-                           /* addr */ nullptr,
+                           /* addr= */ nullptr,
                            alloc_info_size,
                            PROT_READ | PROT_WRITE,
-                           /* low_4gb */ false,
+                           /* low_4gb= */ false,
                            &error_msg);
   CHECK(allocation_info_map_.IsValid()) << "Failed to allocate allocation info map" << error_msg;
   allocation_info_ = reinterpret_cast<AllocationInfo*>(allocation_info_map_.Begin());
diff --git a/runtime/gc/space/malloc_space.cc b/runtime/gc/space/malloc_space.cc
index 445560a..be75efe 100644
--- a/runtime/gc/space/malloc_space.cc
+++ b/runtime/gc/space/malloc_space.cc
@@ -109,7 +109,7 @@
                                         requested_begin,
                                         *capacity,
                                         PROT_READ | PROT_WRITE,
-                                        /* low_4gb */ true,
+                                        /* low_4gb= */ true,
                                         &error_msg);
   if (!mem_map.IsValid()) {
     LOG(ERROR) << "Failed to allocate pages for alloc space (" << name << ") of size "
diff --git a/runtime/gc/space/region_space-inl.h b/runtime/gc/space/region_space-inl.h
index bda1f1c..8cb079d 100644
--- a/runtime/gc/space/region_space-inl.h
+++ b/runtime/gc/space/region_space-inl.h
@@ -409,7 +409,7 @@
     } else {
       DCHECK(reg->IsLargeTail());
     }
-    reg->Clear(/*zero_and_release_pages*/true);
+    reg->Clear(/*zero_and_release_pages=*/true);
     if (kForEvac) {
       --num_evac_regions_;
     } else {
diff --git a/runtime/gc/space/region_space.cc b/runtime/gc/space/region_space.cc
index eba6fac..31bbfb8 100644
--- a/runtime/gc/space/region_space.cc
+++ b/runtime/gc/space/region_space.cc
@@ -58,7 +58,7 @@
                                    requested_begin,
                                    capacity + kRegionSize,
                                    PROT_READ | PROT_WRITE,
-                                   /* low_4gb */ true,
+                                   /* low_4gb= */ true,
                                    &error_msg);
     if (mem_map.IsValid() || requested_begin == nullptr) {
       break;
@@ -393,7 +393,7 @@
   uint8_t* clear_block_begin = nullptr;
   uint8_t* clear_block_end = nullptr;
   auto clear_region = [&clear_block_begin, &clear_block_end](Region* r) {
-    r->Clear(/*zero_and_release_pages*/false);
+    r->Clear(/*zero_and_release_pages=*/false);
     if (clear_block_end != r->Begin()) {
       // Region `r` is not adjacent to the current clear block; zero and release
       // pages within the current block and restart a new clear block at the
@@ -656,7 +656,7 @@
     if (!r->IsFree()) {
       --num_non_free_regions_;
     }
-    r->Clear(/*zero_and_release_pages*/true);
+    r->Clear(/*zero_and_release_pages=*/true);
   }
   SetNonFreeRegionLimit(0);
   DCHECK_EQ(num_non_free_regions_, 0u);
@@ -735,7 +735,7 @@
   RevokeThreadLocalBuffersLocked(self);
   // Retain sufficient free regions for full evacuation.
 
-  Region* r = AllocateRegion(/*for_evac*/ false);
+  Region* r = AllocateRegion(/*for_evac=*/ false);
   if (r != nullptr) {
     r->is_a_tlab_ = true;
     r->thread_ = self;
diff --git a/runtime/gc/space/region_space.h b/runtime/gc/space/region_space.h
index 5af1dd3..cc371b8 100644
--- a/runtime/gc/space/region_space.h
+++ b/runtime/gc/space/region_space.h
@@ -206,12 +206,12 @@
   // Go through all of the blocks and visit the continuous objects.
   template <typename Visitor>
   ALWAYS_INLINE void Walk(Visitor&& visitor) REQUIRES(Locks::mutator_lock_) {
-    WalkInternal<false /* kToSpaceOnly */>(visitor);
+    WalkInternal</* kToSpaceOnly= */ false>(visitor);
   }
   template <typename Visitor>
   ALWAYS_INLINE void WalkToSpace(Visitor&& visitor)
       REQUIRES(Locks::mutator_lock_) {
-    WalkInternal<true /* kToSpaceOnly */>(visitor);
+    WalkInternal</* kToSpaceOnly= */ true>(visitor);
   }
 
   accounting::ContinuousSpaceBitmap::SweepCallback* GetSweepCallback() override {
diff --git a/runtime/gc/system_weak_test.cc b/runtime/gc/system_weak_test.cc
index 07725b9..4fe8027 100644
--- a/runtime/gc/system_weak_test.cc
+++ b/runtime/gc/system_weak_test.cc
@@ -112,6 +112,8 @@
   switch (type) {
     case CollectorType::kCollectorTypeCMS:
     case CollectorType::kCollectorTypeCC:
+    case CollectorType::kCollectorTypeSS:
+    case CollectorType::kCollectorTypeGSS:
       return true;
 
     default:
@@ -143,7 +145,7 @@
   cswh.Set(GcRoot<mirror::Object>(s.Get()));
 
   // Trigger a GC.
-  Runtime::Current()->GetHeap()->CollectGarbage(/* clear_soft_references */ false);
+  Runtime::Current()->GetHeap()->CollectGarbage(/* clear_soft_references= */ false);
 
   // Expect the holder to have been called.
   EXPECT_EQ(CollectorDoesAllowOrBroadcast() ? 1U : 0U, cswh.allow_count_);
@@ -164,7 +166,7 @@
   cswh.Set(GcRoot<mirror::Object>(mirror::String::AllocFromModifiedUtf8(soa.Self(), "ABC")));
 
   // Trigger a GC.
-  Runtime::Current()->GetHeap()->CollectGarbage(/* clear_soft_references */ false);
+  Runtime::Current()->GetHeap()->CollectGarbage(/* clear_soft_references= */ false);
 
   // Expect the holder to have been called.
   EXPECT_EQ(CollectorDoesAllowOrBroadcast() ? 1U : 0U, cswh.allow_count_);
@@ -188,7 +190,7 @@
   cswh.Set(GcRoot<mirror::Object>(s.Get()));
 
   // Trigger a GC.
-  Runtime::Current()->GetHeap()->CollectGarbage(/* clear_soft_references */ false);
+  Runtime::Current()->GetHeap()->CollectGarbage(/* clear_soft_references= */ false);
 
   // Expect the holder to have been called.
   ASSERT_EQ(CollectorDoesAllowOrBroadcast() ? 1U : 0U, cswh.allow_count_);
@@ -203,7 +205,7 @@
   Runtime::Current()->RemoveSystemWeakHolder(&cswh);
 
   // Trigger another GC.
-  Runtime::Current()->GetHeap()->CollectGarbage(/* clear_soft_references */ false);
+  Runtime::Current()->GetHeap()->CollectGarbage(/* clear_soft_references= */ false);
 
   // Expectation: no change in the numbers.
   EXPECT_EQ(CollectorDoesAllowOrBroadcast() ? 1U : 0U, cswh.allow_count_);
diff --git a/runtime/gc/verification.cc b/runtime/gc/verification.cc
index 0281eee..47c54bd 100644
--- a/runtime/gc/verification.cc
+++ b/runtime/gc/verification.cc
@@ -87,7 +87,7 @@
                                      bool fatal) const {
   // Lowest priority logging first:
   PrintFileToLog("/proc/self/maps", android::base::LogSeverity::FATAL_WITHOUT_ABORT);
-  MemMap::DumpMaps(LOG_STREAM(FATAL_WITHOUT_ABORT), /* terse */ true);
+  MemMap::DumpMaps(LOG_STREAM(FATAL_WITHOUT_ABORT), /* terse= */ true);
   Runtime::Current()->GetHeap()->DumpSpaces(LOG_STREAM(FATAL_WITHOUT_ABORT));
   // Buffer the output in the string stream since it is more important than the stack traces
   // and we want it to have log priority. The stack traces are printed from Runtime::Abort
diff --git a/runtime/handle_scope-inl.h b/runtime/handle_scope-inl.h
index d091e7f..f61c700 100644
--- a/runtime/handle_scope-inl.h
+++ b/runtime/handle_scope-inl.h
@@ -199,7 +199,7 @@
 inline VariableSizedHandleScope::VariableSizedHandleScope(Thread* const self)
     : BaseHandleScope(self->GetTopHandleScope()),
       self_(self) {
-  current_scope_ = new LocalScopeType(/*link*/ nullptr);
+  current_scope_ = new LocalScopeType(/*link=*/ nullptr);
   self_->PushHandleScope(this);
 }
 
diff --git a/runtime/hidden_api.h b/runtime/hidden_api.h
index 580224e..c16e7f3 100644
--- a/runtime/hidden_api.h
+++ b/runtime/hidden_api.h
@@ -242,9 +242,9 @@
                               AccessMethod access_method)
     REQUIRES_SHARED(Locks::mutator_lock_) {
   bool is_caller_trusted =
-      detail::IsCallerTrusted(/* caller */ nullptr, caller_class_loader, caller_dex_cache);
+      detail::IsCallerTrusted(/* caller= */ nullptr, caller_class_loader, caller_dex_cache);
   return GetMemberAction(member,
-                         /* thread */ nullptr,
+                         /* thread= */ nullptr,
                          [is_caller_trusted] (Thread*) { return is_caller_trusted; },
                          access_method);
 }
diff --git a/runtime/hprof/hprof.cc b/runtime/hprof/hprof.cc
index 438af12..832bacb 100644
--- a/runtime/hprof/hprof.cc
+++ b/runtime/hprof/hprof.cc
@@ -41,6 +41,7 @@
 #include "art_field-inl.h"
 #include "art_method-inl.h"
 #include "base/array_ref.h"
+#include "base/file_utils.h"
 #include "base/globals.h"
 #include "base/macros.h"
 #include "base/mutex.h"
@@ -761,13 +762,13 @@
     // Where exactly are we writing to?
     int out_fd;
     if (fd_ >= 0) {
-      out_fd = dup(fd_);
+      out_fd = DupCloexec(fd_);
       if (out_fd < 0) {
         ThrowRuntimeException("Couldn't dump heap; dup(%d) failed: %s", fd_, strerror(errno));
         return false;
       }
     } else {
-      out_fd = open(filename_.c_str(), O_WRONLY|O_CREAT|O_TRUNC, 0644);
+      out_fd = open(filename_.c_str(), O_WRONLY | O_CREAT | O_TRUNC | O_CLOEXEC, 0644);
       if (out_fd < 0) {
         ThrowRuntimeException("Couldn't dump heap; open(\"%s\") failed: %s", filename_.c_str(),
                               strerror(errno));
diff --git a/runtime/image.cc b/runtime/image.cc
index a4351d0..e7f4486 100644
--- a/runtime/image.cc
+++ b/runtime/image.cc
@@ -26,7 +26,7 @@
 namespace art {
 
 const uint8_t ImageHeader::kImageMagic[] = { 'a', 'r', 't', '\n' };
-const uint8_t ImageHeader::kImageVersion[] = { '0', '6', '4', '\0' };  // Remove PIC flags.
+const uint8_t ImageHeader::kImageVersion[] = { '0', '6', '5', '\0' };  // Remove relocation section.
 
 ImageHeader::ImageHeader(uint32_t image_begin,
                          uint32_t image_size,
diff --git a/runtime/image.h b/runtime/image.h
index bd8bc28..0496650 100644
--- a/runtime/image.h
+++ b/runtime/image.h
@@ -237,7 +237,6 @@
     kSectionClassTable,
     kSectionStringReferenceOffsets,
     kSectionImageBitmap,
-    kSectionImageRelocations,
     kSectionCount,  // Number of elements in enum.
   };
 
@@ -294,10 +293,6 @@
     return GetImageSection(kSectionImageBitmap);
   }
 
-  const ImageSection& GetImageRelocationsSection() const {
-    return GetImageSection(kSectionImageRelocations);
-  }
-
   const ImageSection& GetImageStringReferenceOffsetsSection() const {
     return GetImageSection(kSectionStringReferenceOffsets);
   }
diff --git a/runtime/indirect_reference_table.cc b/runtime/indirect_reference_table.cc
index d205225..6db4790 100644
--- a/runtime/indirect_reference_table.cc
+++ b/runtime/indirect_reference_table.cc
@@ -80,10 +80,10 @@
 
   const size_t table_bytes = max_count * sizeof(IrtEntry);
   table_mem_map_ = MemMap::MapAnonymous("indirect ref table",
-                                        /* addr */ nullptr,
+                                        /* addr= */ nullptr,
                                         table_bytes,
                                         PROT_READ | PROT_WRITE,
-                                        /* low_4gb */ false,
+                                        /* low_4gb= */ false,
                                         error_msg);
   if (!table_mem_map_.IsValid() && error_msg->empty()) {
     *error_msg = "Unable to map memory for indirect ref table";
@@ -223,10 +223,10 @@
 
   const size_t table_bytes = new_size * sizeof(IrtEntry);
   MemMap new_map = MemMap::MapAnonymous("indirect ref table",
-                                        /* addr */ nullptr,
+                                        /* addr= */ nullptr,
                                         table_bytes,
                                         PROT_READ | PROT_WRITE,
-                                        /* is_low_4gb */ false,
+                                        /* low_4gb= */ false,
                                         error_msg);
   if (!new_map.IsValid()) {
     return false;
diff --git a/runtime/instrumentation.cc b/runtime/instrumentation.cc
index 4937132..d533054 100644
--- a/runtime/instrumentation.cc
+++ b/runtime/instrumentation.cc
@@ -43,6 +43,7 @@
 #include "mirror/object_array-inl.h"
 #include "nth_caller_visitor.h"
 #include "oat_quick_method_header.h"
+#include "runtime-inl.h"
 #include "thread.h"
 #include "thread_list.h"
 
@@ -536,7 +537,7 @@
   } else {
     list.push_back(listener);
   }
-  *has_listener = true;
+  Runtime::DoAndMaybeSwitchInterpreter([=](){ *has_listener = true; });
 }
 
 void Instrumentation::AddListener(InstrumentationListener* listener, uint32_t events) {
@@ -614,11 +615,11 @@
   // Check if the list contains any non-null listener, and update 'has_listener'.
   for (InstrumentationListener* l : list) {
     if (l != nullptr) {
-      *has_listener = true;
+      Runtime::DoAndMaybeSwitchInterpreter([=](){ *has_listener = true; });
       return;
     }
   }
-  *has_listener = false;
+  Runtime::DoAndMaybeSwitchInterpreter([=](){ *has_listener = false; });
 }
 
 void Instrumentation::RemoveListener(InstrumentationListener* listener, uint32_t events) {
@@ -1494,8 +1495,8 @@
     DeoptimizationMethodType deopt_method_type = GetDeoptimizationMethodType(method);
     self->PushDeoptimizationContext(return_value,
                                     return_shorty == 'L' || return_shorty == '[',
-                                    nullptr /* no pending exception */,
-                                    false /* from_code */,
+                                    /* exception= */ nullptr ,
+                                    /* from_code= */ false,
                                     deopt_method_type);
     return GetTwoWordSuccessValue(*return_pc,
                                   reinterpret_cast<uintptr_t>(GetQuickDeoptimizationEntryPoint()));
diff --git a/runtime/instrumentation_test.cc b/runtime/instrumentation_test.cc
index 31cfeb6..d973689 100644
--- a/runtime/instrumentation_test.cc
+++ b/runtime/instrumentation_test.cc
@@ -509,9 +509,9 @@
   ASSERT_TRUE(method->IsDirect());
   ASSERT_TRUE(method->GetDeclaringClass() == klass);
   TestEvent(instrumentation::Instrumentation::kMethodEntered,
-            /*event_method*/ method,
-            /*event_field*/ nullptr,
-            /*with_object*/ true);
+            /*event_method=*/ method,
+            /*event_field=*/ nullptr,
+            /*with_object=*/ true);
 }
 
 TEST_F(InstrumentationTest, MethodExitObjectEvent) {
@@ -529,9 +529,9 @@
   ASSERT_TRUE(method->IsDirect());
   ASSERT_TRUE(method->GetDeclaringClass() == klass);
   TestEvent(instrumentation::Instrumentation::kMethodExited,
-            /*event_method*/ method,
-            /*event_field*/ nullptr,
-            /*with_object*/ true);
+            /*event_method=*/ method,
+            /*event_field=*/ nullptr,
+            /*with_object=*/ true);
 }
 
 TEST_F(InstrumentationTest, MethodExitPrimEvent) {
@@ -548,9 +548,9 @@
   ASSERT_TRUE(method->IsDirect());
   ASSERT_TRUE(method->GetDeclaringClass() == klass);
   TestEvent(instrumentation::Instrumentation::kMethodExited,
-            /*event_method*/ method,
-            /*event_field*/ nullptr,
-            /*with_object*/ false);
+            /*event_method=*/ method,
+            /*event_field=*/ nullptr,
+            /*with_object=*/ false);
 }
 
 TEST_F(InstrumentationTest, MethodUnwindEvent) {
@@ -582,9 +582,9 @@
   ASSERT_TRUE(field != nullptr);
 
   TestEvent(instrumentation::Instrumentation::kFieldWritten,
-            /*event_method*/ nullptr,
-            /*event_field*/ field,
-            /*with_object*/ true);
+            /*event_method=*/ nullptr,
+            /*event_field=*/ field,
+            /*with_object=*/ true);
 }
 
 TEST_F(InstrumentationTest, FieldWritePrimEvent) {
@@ -600,9 +600,9 @@
   ASSERT_TRUE(field != nullptr);
 
   TestEvent(instrumentation::Instrumentation::kFieldWritten,
-            /*event_method*/ nullptr,
-            /*event_field*/ field,
-            /*with_object*/ false);
+            /*event_method=*/ nullptr,
+            /*event_field=*/ field,
+            /*with_object=*/ false);
 }
 
 TEST_F(InstrumentationTest, ExceptionHandledEvent) {
diff --git a/runtime/interpreter/interpreter.cc b/runtime/interpreter/interpreter.cc
index 2ae95dc..b37a278 100644
--- a/runtime/interpreter/interpreter.cc
+++ b/runtime/interpreter/interpreter.cc
@@ -321,7 +321,7 @@
       } else {
         while (true) {
           // Mterp does not support all instrumentation/debugging.
-          if (MterpShouldSwitchInterpreters() != 0) {
+          if (!self->UseMterp()) {
             return ExecuteSwitchImpl<false, false>(self, accessor, shadow_frame, result_register,
                                                    false);
           }
@@ -587,8 +587,8 @@
                       accessor,
                       *shadow_frame,
                       value,
-                      /* stay_in_interpreter */ true,
-                      /* from_deoptimize */ true);
+                      /* stay_in_interpreter= */ true,
+                      /* from_deoptimize= */ true);
     }
     ShadowFrame* old_frame = shadow_frame;
     shadow_frame = shadow_frame->GetLink();
diff --git a/runtime/interpreter/interpreter_common.cc b/runtime/interpreter/interpreter_common.cc
index b170232..2cee813 100644
--- a/runtime/interpreter/interpreter_common.cc
+++ b/runtime/interpreter/interpreter_common.cc
@@ -714,12 +714,12 @@
   if (inst->Opcode() == Instruction::INVOKE_POLYMORPHIC) {
     static const bool kIsRange = false;
     return DoMethodHandleInvokeCommon<kIsRange>(
-        self, shadow_frame, true /* is_exact */, inst, inst_data, result);
+        self, shadow_frame, /* invoke_exact= */ true, inst, inst_data, result);
   } else {
     DCHECK_EQ(inst->Opcode(), Instruction::INVOKE_POLYMORPHIC_RANGE);
     static const bool kIsRange = true;
     return DoMethodHandleInvokeCommon<kIsRange>(
-        self, shadow_frame, true /* is_exact */, inst, inst_data, result);
+        self, shadow_frame, /* invoke_exact= */ true, inst, inst_data, result);
   }
 }
 
@@ -731,12 +731,12 @@
   if (inst->Opcode() == Instruction::INVOKE_POLYMORPHIC) {
     static const bool kIsRange = false;
     return DoMethodHandleInvokeCommon<kIsRange>(
-        self, shadow_frame, false /* is_exact */, inst, inst_data, result);
+        self, shadow_frame, /* invoke_exact= */ false, inst, inst_data, result);
   } else {
     DCHECK_EQ(inst->Opcode(), Instruction::INVOKE_POLYMORPHIC_RANGE);
     static const bool kIsRange = true;
     return DoMethodHandleInvokeCommon<kIsRange>(
-        self, shadow_frame, false /* is_exact */, inst, inst_data, result);
+        self, shadow_frame, /* invoke_exact= */ false, inst, inst_data, result);
   }
 }
 
@@ -1071,7 +1071,7 @@
   return true;
 
 #define COLLECT_REFERENCE_ARRAY(T, Type)                                \
-  Handle<mirror::ObjectArray<T>> array =                                \
+  Handle<mirror::ObjectArray<T>> array =                   /* NOLINT */ \
       hs.NewHandle(mirror::ObjectArray<T>::Alloc(self,                  \
                                                  array_type,            \
                                                  array_length));        \
diff --git a/runtime/interpreter/interpreter_common.h b/runtime/interpreter/interpreter_common.h
index 26bfba9..7055e8a 100644
--- a/runtime/interpreter/interpreter_common.h
+++ b/runtime/interpreter/interpreter_common.h
@@ -290,7 +290,7 @@
     if (jit != nullptr) {
       jit->InvokeVirtualOrInterface(
           receiver, shadow_frame.GetMethod(), shadow_frame.GetDexPC(), called_method);
-      jit->AddSamples(self, shadow_frame.GetMethod(), 1, /*with_backedges*/false);
+      jit->AddSamples(self, shadow_frame.GetMethod(), 1, /*with_backedges=*/false);
     }
     // No need to check since we've been quickened.
     return DoCall<is_range, false>(called_method, self, shadow_frame, inst, inst_data, result);
diff --git a/runtime/interpreter/interpreter_switch_impl.cc b/runtime/interpreter/interpreter_switch_impl.cc
index d9f76ee..4757b57 100644
--- a/runtime/interpreter/interpreter_switch_impl.cc
+++ b/runtime/interpreter/interpreter_switch_impl.cc
@@ -167,7 +167,7 @@
 #define HOTNESS_UPDATE()                                                                       \
   do {                                                                                         \
     if (jit != nullptr) {                                                                      \
-      jit->AddSamples(self, shadow_frame.GetMethod(), 1, /*with_backedges*/ true);             \
+      jit->AddSamples(self, shadow_frame.GetMethod(), 1, /*with_backedges=*/ true);            \
     }                                                                                          \
   } while (false)
 
@@ -1754,7 +1754,7 @@
       case Instruction::INVOKE_POLYMORPHIC: {
         PREAMBLE();
         DCHECK(Runtime::Current()->IsMethodHandlesEnabled());
-        bool success = DoInvokePolymorphic<false /* is_range */>(
+        bool success = DoInvokePolymorphic</* is_range= */ false>(
             self, shadow_frame, inst, inst_data, &result_register);
         POSSIBLY_HANDLE_PENDING_EXCEPTION_ON_INVOKE_POLYMORPHIC(!success);
         break;
@@ -1762,7 +1762,7 @@
       case Instruction::INVOKE_POLYMORPHIC_RANGE: {
         PREAMBLE();
         DCHECK(Runtime::Current()->IsMethodHandlesEnabled());
-        bool success = DoInvokePolymorphic<true /* is_range */>(
+        bool success = DoInvokePolymorphic</* is_range= */ true>(
             self, shadow_frame, inst, inst_data, &result_register);
         POSSIBLY_HANDLE_PENDING_EXCEPTION_ON_INVOKE_POLYMORPHIC(!success);
         break;
@@ -1770,7 +1770,7 @@
       case Instruction::INVOKE_CUSTOM: {
         PREAMBLE();
         DCHECK(Runtime::Current()->IsMethodHandlesEnabled());
-        bool success = DoInvokeCustom<false /* is_range */>(
+        bool success = DoInvokeCustom</* is_range= */ false>(
             self, shadow_frame, inst, inst_data, &result_register);
         POSSIBLY_HANDLE_PENDING_EXCEPTION_ON_INVOKE(!success);
         break;
@@ -1778,7 +1778,7 @@
       case Instruction::INVOKE_CUSTOM_RANGE: {
         PREAMBLE();
         DCHECK(Runtime::Current()->IsMethodHandlesEnabled());
-        bool success = DoInvokeCustom<true /* is_range */>(
+        bool success = DoInvokeCustom</* is_range= */ true>(
             self, shadow_frame, inst, inst_data, &result_register);
         POSSIBLY_HANDLE_PENDING_EXCEPTION_ON_INVOKE(!success);
         break;
diff --git a/runtime/interpreter/mterp/arm/invoke.S b/runtime/interpreter/mterp/arm/invoke.S
index 8693d3b..08fd1bb 100644
--- a/runtime/interpreter/mterp/arm/invoke.S
+++ b/runtime/interpreter/mterp/arm/invoke.S
@@ -14,9 +14,9 @@
     cmp     r0, #0
     beq     MterpException
     FETCH_ADVANCE_INST 3
-    bl      MterpShouldSwitchInterpreters
+    ldr     r0, [rSELF, #THREAD_USE_MTERP_OFFSET]
     cmp     r0, #0
-    bne     MterpFallback
+    beq     MterpFallback
     GET_INST_OPCODE ip
     GOTO_OPCODE ip
 
@@ -37,9 +37,9 @@
     cmp     r0, #0
     beq     MterpException
     FETCH_ADVANCE_INST 4
-    bl      MterpShouldSwitchInterpreters
+    ldr     r0, [rSELF, #THREAD_USE_MTERP_OFFSET]
     cmp     r0, #0
-    bne     MterpFallback
+    beq     MterpFallback
     GET_INST_OPCODE ip
     GOTO_OPCODE ip
 
diff --git a/runtime/interpreter/mterp/arm/main.S b/runtime/interpreter/mterp/arm/main.S
index f5fdf14..a9cffe7 100644
--- a/runtime/interpreter/mterp/arm/main.S
+++ b/runtime/interpreter/mterp/arm/main.S
@@ -531,9 +531,9 @@
     ldr     rIBASE, [rSELF, #THREAD_CURRENT_IBASE_OFFSET]
     add     rPC, r0, r1, lsl #1                     @ generate new dex_pc_ptr
     /* Do we need to switch interpreters? */
-    bl      MterpShouldSwitchInterpreters
+    ldr     r0, [rSELF, #THREAD_USE_MTERP_OFFSET]
     cmp     r0, #0
-    bne     MterpFallback
+    beq     MterpFallback
     /* resume execution at catch block */
     EXPORT_PC
     FETCH_INST
diff --git a/runtime/interpreter/mterp/arm64/invoke.S b/runtime/interpreter/mterp/arm64/invoke.S
index 03ac316..4844213 100644
--- a/runtime/interpreter/mterp/arm64/invoke.S
+++ b/runtime/interpreter/mterp/arm64/invoke.S
@@ -13,8 +13,8 @@
     bl      $helper
     cbz     w0, MterpException
     FETCH_ADVANCE_INST 3
-    bl      MterpShouldSwitchInterpreters
-    cbnz    w0, MterpFallback
+    ldr     w0, [xSELF, #THREAD_USE_MTERP_OFFSET]
+    cbz     w0, MterpFallback
     GET_INST_OPCODE ip
     GOTO_OPCODE ip
 
@@ -34,8 +34,8 @@
     bl      $helper
     cbz     w0, MterpException
     FETCH_ADVANCE_INST 4
-    bl      MterpShouldSwitchInterpreters
-    cbnz    w0, MterpFallback
+    ldr     w0, [xSELF, #THREAD_USE_MTERP_OFFSET]
+    cbz     w0, MterpFallback
     GET_INST_OPCODE ip
     GOTO_OPCODE ip
 
diff --git a/runtime/interpreter/mterp/arm64/main.S b/runtime/interpreter/mterp/arm64/main.S
index 1b72e79..858cb38 100644
--- a/runtime/interpreter/mterp/arm64/main.S
+++ b/runtime/interpreter/mterp/arm64/main.S
@@ -553,8 +553,8 @@
     ldr     xIBASE, [xSELF, #THREAD_CURRENT_IBASE_OFFSET]
     add     xPC, x0, x1, lsl #1                     // generate new dex_pc_ptr
     /* Do we need to switch interpreters? */
-    bl      MterpShouldSwitchInterpreters
-    cbnz    w0, MterpFallback
+    ldr     w0, [xSELF, #THREAD_USE_MTERP_OFFSET]
+    cbz     w0, MterpFallback
     /* resume execution at catch block */
     EXPORT_PC
     FETCH_INST
diff --git a/runtime/interpreter/mterp/mterp.cc b/runtime/interpreter/mterp/mterp.cc
index be985ff..4b6f430 100644
--- a/runtime/interpreter/mterp/mterp.cc
+++ b/runtime/interpreter/mterp/mterp.cc
@@ -142,27 +142,19 @@
   return entries[index];
 }
 
-extern "C" size_t MterpShouldSwitchInterpreters()
+bool CanUseMterp()
     REQUIRES_SHARED(Locks::mutator_lock_) {
   const Runtime* const runtime = Runtime::Current();
-  const instrumentation::Instrumentation* const instrumentation = runtime->GetInstrumentation();
-  return instrumentation->NonJitProfilingActive() ||
-      Dbg::IsDebuggerActive() ||
+  return
+      !Dbg::IsDebuggerActive() &&
+      !runtime->GetInstrumentation()->NonJitProfilingActive() &&
       // mterp only knows how to deal with the normal exits. It cannot handle any of the
       // non-standard force-returns.
-      // TODO We really only need to switch interpreters if a PopFrame has actually happened. We
-      // should check this here.
-      UNLIKELY(runtime->AreNonStandardExitsEnabled()) ||
+      !runtime->AreNonStandardExitsEnabled() &&
       // An async exception has been thrown. We need to go to the switch interpreter. MTerp doesn't
       // know how to deal with these so we could end up never dealing with it if we are in an
-      // infinite loop. Since this can be called in a tight loop and getting the current thread
-      // requires a TLS read we instead first check a short-circuit runtime flag that will only be
-      // set if something tries to set an async exception. This will make this function faster in
-      // the common case where no async exception has ever been sent. We don't need to worry about
-      // synchronization on the runtime flag since it is only set in a checkpoint which will either
-      // take place on the current thread or act as a synchronization point.
-      (UNLIKELY(runtime->AreAsyncExceptionsThrown()) &&
-       Thread::Current()->IsAsyncExceptionPending());
+      // infinite loop.
+      !runtime->AreAsyncExceptionsThrown();
 }
 
 
@@ -228,7 +220,7 @@
     REQUIRES_SHARED(Locks::mutator_lock_) {
   JValue* result_register = shadow_frame->GetResultRegister();
   const Instruction* inst = Instruction::At(dex_pc_ptr);
-  return DoInvokeCustom<false /* is_range */>(
+  return DoInvokeCustom</* is_range= */ false>(
       self, *shadow_frame, inst, inst_data, result_register);
 }
 
@@ -239,7 +231,7 @@
     REQUIRES_SHARED(Locks::mutator_lock_) {
   JValue* result_register = shadow_frame->GetResultRegister();
   const Instruction* inst = Instruction::At(dex_pc_ptr);
-  return DoInvokePolymorphic<false /* is_range */>(
+  return DoInvokePolymorphic</* is_range= */ false>(
       self, *shadow_frame, inst, inst_data, result_register);
 }
 
@@ -305,7 +297,7 @@
     REQUIRES_SHARED(Locks::mutator_lock_) {
   JValue* result_register = shadow_frame->GetResultRegister();
   const Instruction* inst = Instruction::At(dex_pc_ptr);
-  return DoInvokeCustom<true /* is_range */>(self, *shadow_frame, inst, inst_data, result_register);
+  return DoInvokeCustom</*is_range=*/ true>(self, *shadow_frame, inst, inst_data, result_register);
 }
 
 extern "C" size_t MterpInvokePolymorphicRange(Thread* self,
@@ -315,7 +307,7 @@
     REQUIRES_SHARED(Locks::mutator_lock_) {
   JValue* result_register = shadow_frame->GetResultRegister();
   const Instruction* inst = Instruction::At(dex_pc_ptr);
-  return DoInvokePolymorphic<true /* is_range */>(
+  return DoInvokePolymorphic</* is_range= */ true>(
       self, *shadow_frame, inst, inst_data, result_register);
 }
 
@@ -383,8 +375,8 @@
   ObjPtr<mirror::Class> c = ResolveVerifyAndClinit(dex::TypeIndex(index),
                                                    shadow_frame->GetMethod(),
                                                    self,
-                                                   /* can_run_clinit */ false,
-                                                   /* verify_access */ false);
+                                                   /* can_run_clinit= */ false,
+                                                   /* verify_access= */ false);
   if (UNLIKELY(c == nullptr)) {
     return true;
   }
@@ -471,8 +463,8 @@
   ObjPtr<mirror::Class> c = ResolveVerifyAndClinit(dex::TypeIndex(inst->VRegB_21c()),
                                                    shadow_frame->GetMethod(),
                                                    self,
-                                                   /* can_run_clinit */ false,
-                                                   /* verify_access */ false);
+                                                   /* can_run_clinit= */ false,
+                                                   /* verify_access= */ false);
   if (LIKELY(c != nullptr)) {
     if (UNLIKELY(c->IsStringClass())) {
       gc::AllocatorType allocator_type = Runtime::Current()->GetHeap()->GetCurrentAllocator();
@@ -562,6 +554,12 @@
 
 extern "C" void MterpCheckBefore(Thread* self, ShadowFrame* shadow_frame, uint16_t* dex_pc_ptr)
     REQUIRES_SHARED(Locks::mutator_lock_) {
+  // Check that we are using the right interpreter.
+  if (kIsDebugBuild && self->UseMterp() != CanUseMterp()) {
+    // The flag might be currently being updated on all threads. Retry with lock.
+    MutexLock tll_mu(self, *Locks::thread_list_lock_);
+    DCHECK_EQ(self->UseMterp(), CanUseMterp());
+  }
   const Instruction* inst = Instruction::At(dex_pc_ptr);
   uint16_t inst_data = inst->Fetch16(0);
   if (inst->Opcode(inst_data) == Instruction::MOVE_EXCEPTION) {
@@ -661,7 +659,7 @@
 extern "C" size_t MterpSuspendCheck(Thread* self)
     REQUIRES_SHARED(Locks::mutator_lock_) {
   self->AllowThreadSuspension();
-  return MterpShouldSwitchInterpreters();
+  return !self->UseMterp();
 }
 
 // Execute single field access instruction (get/put, static/instance).
@@ -684,8 +682,8 @@
   if (kIsPrimitive) {
     if (kIsRead) {
       PrimType value = UNLIKELY(is_volatile)
-          ? obj->GetFieldPrimitive<PrimType, /*kIsVolatile*/ true>(offset)
-          : obj->GetFieldPrimitive<PrimType, /*kIsVolatile*/ false>(offset);
+          ? obj->GetFieldPrimitive<PrimType, /*kIsVolatile=*/ true>(offset)
+          : obj->GetFieldPrimitive<PrimType, /*kIsVolatile=*/ false>(offset);
       if (sizeof(PrimType) == sizeof(uint64_t)) {
         shadow_frame->SetVRegLong(vRegA, value);  // Set two consecutive registers.
       } else {
@@ -696,9 +694,9 @@
           ? shadow_frame->GetVRegLong(vRegA)
           : shadow_frame->GetVReg(vRegA);
       if (UNLIKELY(is_volatile)) {
-        obj->SetFieldPrimitive<PrimType, /*kIsVolatile*/ true>(offset, value);
+        obj->SetFieldPrimitive<PrimType, /*kIsVolatile=*/ true>(offset, value);
       } else {
-        obj->SetFieldPrimitive<PrimType, /*kIsVolatile*/ false>(offset, value);
+        obj->SetFieldPrimitive<PrimType, /*kIsVolatile=*/ false>(offset, value);
       }
     }
   } else {  // Object.
@@ -710,9 +708,9 @@
     } else {  // Write.
       ObjPtr<mirror::Object> value = shadow_frame->GetVRegReference(vRegA);
       if (UNLIKELY(is_volatile)) {
-        obj->SetFieldObjectVolatile</*kTransactionActive*/ false>(offset, value);
+        obj->SetFieldObjectVolatile</*kTransactionActive=*/ false>(offset, value);
       } else {
-        obj->SetFieldObject</*kTransactionActive*/ false>(offset, value);
+        obj->SetFieldObject</*kTransactionActive=*/ false>(offset, value);
       }
     }
   }
@@ -731,7 +729,7 @@
   shadow_frame->SetDexPCPtr(reinterpret_cast<uint16_t*>(inst));
   ArtMethod* referrer = shadow_frame->GetMethod();
   uint32_t field_idx = kIsStatic ? inst->VRegB_21c() : inst->VRegC_22c();
-  ArtField* field = FindFieldFromCode<kAccessType, /* access_checks */ false>(
+  ArtField* field = FindFieldFromCode<kAccessType, /* access_checks= */ false>(
       field_idx, referrer, self, sizeof(PrimType));
   if (UNLIKELY(field == nullptr)) {
     DCHECK(self->IsExceptionPending());
@@ -772,7 +770,7 @@
         : tls_value;
     if (kIsDebugBuild) {
       uint32_t field_idx = kIsStatic ? inst->VRegB_21c() : inst->VRegC_22c();
-      ArtField* field = FindFieldFromCode<kAccessType, /* access_checks */ false>(
+      ArtField* field = FindFieldFromCode<kAccessType, /* access_checks= */ false>(
           field_idx, shadow_frame->GetMethod(), self, sizeof(PrimType));
       DCHECK_EQ(offset, field->GetOffset().SizeValue());
     }
@@ -781,7 +779,7 @@
         : MakeObjPtr(shadow_frame->GetVRegReference(inst->VRegB_22c(inst_data)));
     if (LIKELY(obj != nullptr)) {
       MterpFieldAccess<PrimType, kAccessType>(
-          inst, inst_data, shadow_frame, obj, MemberOffset(offset), /* is_volatile */ false);
+          inst, inst_data, shadow_frame, obj, MemberOffset(offset), /* is_volatile= */ false);
       return true;
     }
   }
@@ -800,7 +798,7 @@
     if (LIKELY(field != nullptr)) {
       bool initialized = !kIsStatic || field->GetDeclaringClass()->IsInitialized();
       if (LIKELY(initialized)) {
-        DCHECK_EQ(field, (FindFieldFromCode<kAccessType, /* access_checks */ false>(
+        DCHECK_EQ(field, (FindFieldFromCode<kAccessType, /* access_checks= */ false>(
             field_idx, referrer, self, sizeof(PrimType))));
         ObjPtr<mirror::Object> obj = kIsStatic
             ? field->GetDeclaringClass().Ptr()
@@ -932,7 +930,7 @@
   jit::Jit* jit = Runtime::Current()->GetJit();
   if (jit != nullptr) {
     int16_t count = shadow_frame->GetCachedHotnessCountdown() - shadow_frame->GetHotnessCountdown();
-    jit->AddSamples(self, method, count, /*with_backedges*/ true);
+    jit->AddSamples(self, method, count, /*with_backedges=*/ true);
   }
   return MterpSetUpHotnessCountdown(method, shadow_frame, self);
 }
@@ -957,7 +955,7 @@
     osr_countdown = jit::Jit::kJitRecheckOSRThreshold;
     if (offset <= 0) {
       // Keep updating hotness in case a compilation request was dropped.  Eventually it will retry.
-      jit->AddSamples(self, method, osr_countdown, /*with_backedges*/ true);
+      jit->AddSamples(self, method, osr_countdown, /*with_backedges=*/ true);
     }
     did_osr = jit::Jit::MaybeDoOnStackReplacement(self, method, dex_pc, offset, result);
   }
diff --git a/runtime/interpreter/mterp/mterp.h b/runtime/interpreter/mterp/mterp.h
index 81a53c8..af52758 100644
--- a/runtime/interpreter/mterp/mterp.h
+++ b/runtime/interpreter/mterp/mterp.h
@@ -34,12 +34,7 @@
 
 void InitMterpTls(Thread* self);
 void CheckMterpAsmConstants();
-
-// The return type should be 'bool' but our assembly stubs expect 'bool'
-// to be zero-extended to the whole register and that's broken on x86-64
-// as a 'bool' is returned in 'al' and the rest of 'rax' is garbage.
-// TODO: Fix mterp and stubs and revert this workaround. http://b/30232671
-extern "C" size_t MterpShouldSwitchInterpreters();
+bool CanUseMterp();
 
 // Poison value for TestExportPC.  If we segfault with this value, it means that a mterp
 // handler for a recent opcode failed to export the Dalvik PC prior to a possible exit from
diff --git a/runtime/interpreter/mterp/x86/invoke.S b/runtime/interpreter/mterp/x86/invoke.S
index 587c4cf..cfb9c7c 100644
--- a/runtime/interpreter/mterp/x86/invoke.S
+++ b/runtime/interpreter/mterp/x86/invoke.S
@@ -17,9 +17,10 @@
     testb   %al, %al
     jz      MterpException
     ADVANCE_PC 3
-    call    SYMBOL(MterpShouldSwitchInterpreters)
+    movl    rSELF, %eax
+    movb    THREAD_USE_MTERP_OFFSET(%eax), %al
     testb   %al, %al
-    jnz     MterpFallback
+    jz      MterpFallback
     RESTORE_IBASE
     FETCH_INST
     GOTO_NEXT
@@ -43,9 +44,10 @@
     testb   %al, %al
     jz      MterpException
     ADVANCE_PC 4
-    call    SYMBOL(MterpShouldSwitchInterpreters)
+    movl    rSELF, %eax
+    movb    THREAD_USE_MTERP_OFFSET(%eax), %al
     testb   %al, %al
-    jnz     MterpFallback
+    jz      MterpFallback
     RESTORE_IBASE
     FETCH_INST
     GOTO_NEXT
diff --git a/runtime/interpreter/mterp/x86/main.S b/runtime/interpreter/mterp/x86/main.S
index 04b653e..b233f2c 100644
--- a/runtime/interpreter/mterp/x86/main.S
+++ b/runtime/interpreter/mterp/x86/main.S
@@ -560,9 +560,10 @@
     lea     (%eax, %ecx, 2), rPC
     movl    rPC, OFF_FP_DEX_PC_PTR(rFP)
     /* Do we need to switch interpreters? */
-    call    SYMBOL(MterpShouldSwitchInterpreters)
+    movl    rSELF, %eax
+    movb    THREAD_USE_MTERP_OFFSET(%eax), %al
     testb   %al, %al
-    jnz     MterpFallback
+    jz      MterpFallback
     /* resume execution at catch block */
     REFRESH_IBASE
     FETCH_INST
diff --git a/runtime/interpreter/mterp/x86_64/invoke.S b/runtime/interpreter/mterp/x86_64/invoke.S
index 63c233c..f727915 100644
--- a/runtime/interpreter/mterp/x86_64/invoke.S
+++ b/runtime/interpreter/mterp/x86_64/invoke.S
@@ -15,9 +15,10 @@
     testb   %al, %al
     jz      MterpException
     ADVANCE_PC 3
-    call    SYMBOL(MterpShouldSwitchInterpreters)
+    movq    rSELF, %rax
+    movb    THREAD_USE_MTERP_OFFSET(%rax), %al
     testb   %al, %al
-    jnz     MterpFallback
+    jz      MterpFallback
     FETCH_INST
     GOTO_NEXT
 
@@ -38,9 +39,10 @@
     testb   %al, %al
     jz      MterpException
     ADVANCE_PC 4
-    call    SYMBOL(MterpShouldSwitchInterpreters)
+    movq    rSELF, %rax
+    movb    THREAD_USE_MTERP_OFFSET(%rax), %al
     testb   %al, %al
-    jnz     MterpFallback
+    jz      MterpFallback
     FETCH_INST
     GOTO_NEXT
 
diff --git a/runtime/interpreter/mterp/x86_64/main.S b/runtime/interpreter/mterp/x86_64/main.S
index e283bbe..75eb00c 100644
--- a/runtime/interpreter/mterp/x86_64/main.S
+++ b/runtime/interpreter/mterp/x86_64/main.S
@@ -526,9 +526,10 @@
     leaq    (%rax, %rcx, 2), rPC
     movq    rPC, OFF_FP_DEX_PC_PTR(rFP)
     /* Do we need to switch interpreters? */
-    call    SYMBOL(MterpShouldSwitchInterpreters)
+    movq    rSELF, %rax
+    movb    THREAD_USE_MTERP_OFFSET(%rax), %al
     testb   %al, %al
-    jnz     MterpFallback
+    jz      MterpFallback
     /* resume execution at catch block */
     REFRESH_IBASE
     FETCH_INST
diff --git a/runtime/interpreter/unstarted_runtime.cc b/runtime/interpreter/unstarted_runtime.cc
index 38ecc5a..07afba4 100644
--- a/runtime/interpreter/unstarted_runtime.cc
+++ b/runtime/interpreter/unstarted_runtime.cc
@@ -865,10 +865,10 @@
       // checking version, however, does.
       if (Runtime::Current()->IsActiveTransaction()) {
         dst->AssignableCheckingMemcpy<true>(
-            dst_pos, src, src_pos, length, true /* throw_exception */);
+            dst_pos, src, src_pos, length, /* throw_exception= */ true);
       } else {
         dst->AssignableCheckingMemcpy<false>(
-                    dst_pos, src, src_pos, length, true /* throw_exception */);
+            dst_pos, src, src_pos, length, /* throw_exception= */ true);
       }
     }
   } else if (src_type->IsPrimitiveByte()) {
@@ -1478,9 +1478,9 @@
             reinterpret_cast<uint8_t*>(obj) + static_cast<size_t>(offset));
     ReadBarrier::Barrier<
         mirror::Object,
-        /* kIsVolatile */ false,
+        /* kIsVolatile= */ false,
         kWithReadBarrier,
-        /* kAlwaysUpdateField */ true>(
+        /* kAlwaysUpdateField= */ true>(
         obj,
         MemberOffset(offset),
         field_addr);
diff --git a/runtime/interpreter/unstarted_runtime_test.cc b/runtime/interpreter/unstarted_runtime_test.cc
index bd2705d..3fafc31 100644
--- a/runtime/interpreter/unstarted_runtime_test.cc
+++ b/runtime/interpreter/unstarted_runtime_test.cc
@@ -695,7 +695,7 @@
       {  ld2,  ld2 }
   };
 
-  TestCeilFloor(true /* ceil */, self, tmp.get(), test_pairs, arraysize(test_pairs));
+  TestCeilFloor(/* ceil= */ true, self, tmp.get(), test_pairs, arraysize(test_pairs));
 }
 
 TEST_F(UnstartedRuntimeTest, Floor) {
@@ -722,7 +722,7 @@
       {  ld2,  ld2 }
   };
 
-  TestCeilFloor(false /* floor */, self, tmp.get(), test_pairs, arraysize(test_pairs));
+  TestCeilFloor(/* ceil= */ false, self, tmp.get(), test_pairs, arraysize(test_pairs));
 }
 
 TEST_F(UnstartedRuntimeTest, ToLowerUpper) {
diff --git a/runtime/jit/jit.cc b/runtime/jit/jit.cc
index c1f69b8..ef893ee 100644
--- a/runtime/jit/jit.cc
+++ b/runtime/jit/jit.cc
@@ -599,12 +599,12 @@
   void Run(Thread* self) override {
     ScopedObjectAccess soa(self);
     if (kind_ == kCompile) {
-      Runtime::Current()->GetJit()->CompileMethod(method_, self, /* osr */ false);
+      Runtime::Current()->GetJit()->CompileMethod(method_, self, /* osr= */ false);
     } else if (kind_ == kCompileOsr) {
-      Runtime::Current()->GetJit()->CompileMethod(method_, self, /* osr */ true);
+      Runtime::Current()->GetJit()->CompileMethod(method_, self, /* osr= */ true);
     } else {
       DCHECK(kind_ == kAllocateProfile);
-      if (ProfilingInfo::Create(self, method_, /* retry_allocation */ true)) {
+      if (ProfilingInfo::Create(self, method_, /* retry_allocation= */ true)) {
         VLOG(jit) << "Start profiling " << ArtMethod::PrettyMethod(method_);
       }
     }
@@ -673,7 +673,7 @@
   if (LIKELY(!method->IsNative()) && starting_count < WarmMethodThreshold()) {
     if ((new_count >= WarmMethodThreshold()) &&
         (method->GetProfilingInfo(kRuntimePointerSize) == nullptr)) {
-      bool success = ProfilingInfo::Create(self, method, /* retry_allocation */ false);
+      bool success = ProfilingInfo::Create(self, method, /* retry_allocation= */ false);
       if (success) {
         VLOG(jit) << "Start profiling " << method->PrettyMethod();
       }
@@ -741,7 +741,7 @@
     if (np_method->IsCompilable()) {
       if (!np_method->IsNative()) {
         // The compiler requires a ProfilingInfo object for non-native methods.
-        ProfilingInfo::Create(thread, np_method, /* retry_allocation */ true);
+        ProfilingInfo::Create(thread, np_method, /* retry_allocation= */ true);
       }
       JitCompileTask compile_task(method, JitCompileTask::kCompile);
       // Fake being in a runtime thread so that class-load behavior will be the same as normal jit.
@@ -761,7 +761,7 @@
     Runtime::Current()->GetInstrumentation()->UpdateMethodsCode(
         method, profiling_info->GetSavedEntryPoint());
   } else {
-    AddSamples(thread, method, 1, /* with_backedges */false);
+    AddSamples(thread, method, 1, /* with_backedges= */false);
   }
 }
 
diff --git a/runtime/jit/jit_code_cache.cc b/runtime/jit/jit_code_cache.cc
index 63cb6a4..8600b41 100644
--- a/runtime/jit/jit_code_cache.cc
+++ b/runtime/jit/jit_code_cache.cc
@@ -221,7 +221,7 @@
   unique_fd mem_fd;
 
   // Bionic supports memfd_create, but the call may fail on older kernels.
-  mem_fd = unique_fd(art::memfd_create("/jit-cache", /* flags */ 0));
+  mem_fd = unique_fd(art::memfd_create("/jit-cache", /* flags= */ 0));
   if (mem_fd.get() < 0) {
     VLOG(jit) << "Failed to initialize dual view JIT. memfd_create() error: "
               << strerror(errno);
@@ -281,8 +281,8 @@
         kProtRW,
         base_flags,
         mem_fd,
-        /* start */ 0,
-        /* low_4gb */ true,
+        /* start= */ 0,
+        /* low_4gb= */ true,
         "data-code-cache",
         &error_str);
   } else {
@@ -303,12 +303,12 @@
     base_flags = MAP_PRIVATE | MAP_ANON;
     data_pages = MemMap::MapAnonymous(
         "data-code-cache",
-        /* addr */ nullptr,
+        /* addr= */ nullptr,
         data_capacity + exec_capacity,
         kProtRW,
-        /* low_4gb */ true,
-        /* reuse */ false,
-        /* reservation */ nullptr,
+        /* low_4gb= */ true,
+        /* reuse= */ false,
+        /* reservation= */ nullptr,
         &error_str);
   }
 
@@ -347,8 +347,8 @@
                                        kProtR,
                                        base_flags,
                                        mem_fd,
-                                       /* start */ data_capacity,
-                                       /* low_4GB */ false,
+                                       /* start= */ data_capacity,
+                                       /* low_4GB= */ false,
                                        "jit-code-cache-rw",
                                        &error_str);
       if (!non_exec_pages.IsValid()) {
@@ -1008,7 +1008,7 @@
         // Simply discard the compiled code. Clear the counter so that it may be recompiled later.
         // Hopefully the class hierarchy will be more stable when compilation is retried.
         single_impl_still_valid = false;
-        ClearMethodCounter(method, /*was_warm*/ false);
+        ClearMethodCounter(method, /*was_warm=*/ false);
         break;
       }
     }
@@ -1156,7 +1156,7 @@
 // method. The compiled code for the method (if there is any) must not be in any threads call stack.
 void JitCodeCache::NotifyMethodRedefined(ArtMethod* method) {
   MutexLock mu(Thread::Current(), lock_);
-  RemoveMethodLocked(method, /* release_memory */ true);
+  RemoveMethodLocked(method, /* release_memory= */ true);
 }
 
 // This invalidates old_method. Once this function returns one can no longer use old_method to
@@ -1314,7 +1314,7 @@
         // its stack frame, it is not the method owning return_pc_. We just pass null to
         // LookupMethodHeader: the method is only checked against in debug builds.
         OatQuickMethodHeader* method_header =
-            code_cache_->LookupMethodHeader(frame.return_pc_, /* method */ nullptr);
+            code_cache_->LookupMethodHeader(frame.return_pc_, /* method= */ nullptr);
         if (method_header != nullptr) {
           const void* code = method_header->GetCode();
           CHECK(code_cache_->GetLiveBitmap()->Test(FromCodeToAllocation(code)));
@@ -1438,7 +1438,7 @@
               << PrettySize(CodeCacheSize())
               << ", data=" << PrettySize(DataCacheSize());
 
-    DoCollection(self, /* collect_profiling_info */ do_full_collection);
+    DoCollection(self, /* collect_profiling_info= */ do_full_collection);
 
     VLOG(jit) << "After code cache collection, code="
               << PrettySize(CodeCacheSize())
@@ -1551,7 +1551,7 @@
           info->SetSavedEntryPoint(nullptr);
           // We are going to move this method back to interpreter. Clear the counter now to
           // give it a chance to be hot again.
-          ClearMethodCounter(info->GetMethod(), /*was_warm*/ true);
+          ClearMethodCounter(info->GetMethod(), /*was_warm=*/ true);
         }
       }
     } else if (kIsDebugBuild) {
@@ -1933,7 +1933,7 @@
       VLOG(jit) << method->PrettyMethod() << " needs a ProfilingInfo to be compiled";
       // Because the counter is not atomic, there are some rare cases where we may not hit the
       // threshold for creating the ProfilingInfo. Reset the counter now to "correct" this.
-      ClearMethodCounter(method, /*was_warm*/ false);
+      ClearMethodCounter(method, /*was_warm=*/ false);
       return false;
     }
 
@@ -2009,7 +2009,7 @@
     // and clear the counter to get the method Jitted again.
     Runtime::Current()->GetInstrumentation()->UpdateMethodsCode(
         method, GetQuickToInterpreterBridge());
-    ClearMethodCounter(method, /*was_warm*/ profiling_info != nullptr);
+    ClearMethodCounter(method, /*was_warm=*/ profiling_info != nullptr);
   } else {
     MutexLock mu(Thread::Current(), lock_);
     auto it = osr_code_map_.find(method);
diff --git a/runtime/jit/profile_saver.cc b/runtime/jit/profile_saver.cc
index 9043f26..e3248ea 100644
--- a/runtime/jit/profile_saver.cc
+++ b/runtime/jit/profile_saver.cc
@@ -129,7 +129,7 @@
     }
     total_ms_of_sleep_ += options_.GetSaveResolvedClassesDelayMs();
   }
-  FetchAndCacheResolvedClassesAndMethods(/*startup*/ true);
+  FetchAndCacheResolvedClassesAndMethods(/*startup=*/ true);
 
 
   // When we save without waiting for JIT notifications we use a simple
@@ -183,7 +183,7 @@
 
     uint16_t number_of_new_methods = 0;
     uint64_t start_work = NanoTime();
-    bool profile_saved_to_disk = ProcessProfilingInfo(/*force_save*/false, &number_of_new_methods);
+    bool profile_saved_to_disk = ProcessProfilingInfo(/*force_save=*/false, &number_of_new_methods);
     // Update the notification counter based on result. Note that there might be contention on this
     // but we don't care about to be 100% precise.
     if (!profile_saved_to_disk) {
@@ -501,7 +501,7 @@
 
   // We only need to do this once, not once per dex location.
   // TODO: Figure out a way to only do it when stuff has changed? It takes 30-50ms.
-  FetchAndCacheResolvedClassesAndMethods(/*startup*/ false);
+  FetchAndCacheResolvedClassesAndMethods(/*startup=*/ false);
 
   for (const auto& it : tracked_locations) {
     if (!force_save && ShuttingDown(Thread::Current())) {
@@ -521,7 +521,7 @@
     }
     {
       ProfileCompilationInfo info(Runtime::Current()->GetArenaPool());
-      if (!info.Load(filename, /*clear_if_invalid*/ true)) {
+      if (!info.Load(filename, /*clear_if_invalid=*/ true)) {
         LOG(WARNING) << "Could not forcefully load profile " << filename;
         continue;
       }
@@ -607,9 +607,9 @@
   Runtime* runtime = Runtime::Current();
 
   bool attached = runtime->AttachCurrentThread("Profile Saver",
-                                               /*as_daemon*/true,
+                                               /*as_daemon=*/true,
                                                runtime->GetSystemThreadGroup(),
-                                               /*create_peer*/true);
+                                               /*create_peer=*/true);
   if (!attached) {
     CHECK(runtime->IsShuttingDown(Thread::Current()));
     return nullptr;
@@ -751,7 +751,7 @@
 
   // Force save everything before destroying the thread since we want profiler_pthread_ to remain
   // valid.
-  instance_->ProcessProfilingInfo(/*force_save*/true, /*number_of_new_methods*/nullptr);
+  instance_->ProcessProfilingInfo(/*force_save=*/true, /*number_of_new_methods=*/nullptr);
 
   // Wait for the saver thread to stop.
   CHECK_PTHREAD_CALL(pthread_join, (profiler_pthread, nullptr), "profile saver thread shutdown");
@@ -838,7 +838,7 @@
   // but we only use this in testing when we now this won't happen.
   // Refactor the way we handle the instance so that we don't end up in this situation.
   if (saver != nullptr) {
-    saver->ProcessProfilingInfo(/*force_save*/true, /*number_of_new_methods*/nullptr);
+    saver->ProcessProfilingInfo(/*force_save=*/true, /*number_of_new_methods=*/nullptr);
   }
 }
 
@@ -846,7 +846,7 @@
   MutexLock mu(Thread::Current(), *Locks::profiler_lock_);
   if (instance_ != nullptr) {
     ProfileCompilationInfo info(Runtime::Current()->GetArenaPool());
-    if (!info.Load(profile, /*clear_if_invalid*/false)) {
+    if (!info.Load(profile, /*clear_if_invalid=*/false)) {
       return false;
     }
     ProfileCompilationInfo::MethodHotness hotness = info.GetMethodHotness(ref);
diff --git a/runtime/jit/profiling_info.h b/runtime/jit/profiling_info.h
index a3dae83..f6139bb 100644
--- a/runtime/jit/profiling_info.h
+++ b/runtime/jit/profiling_info.h
@@ -125,7 +125,7 @@
   }
 
   bool IsInUseByCompiler() const {
-    return IsMethodBeingCompiled(/*osr*/ true) || IsMethodBeingCompiled(/*osr*/ false) ||
+    return IsMethodBeingCompiled(/*osr=*/ true) || IsMethodBeingCompiled(/*osr=*/ false) ||
         (current_inline_uses_ > 0);
   }
 
diff --git a/runtime/jni/check_jni.cc b/runtime/jni/check_jni.cc
index 6f61f5e..48f9981 100644
--- a/runtime/jni/check_jni.cc
+++ b/runtime/jni/check_jni.cc
@@ -286,7 +286,7 @@
     // to get reasonable stacks and environment, rather than relying on
     // tombstoned.
     JNIEnv* env;
-    Runtime::Current()->GetJavaVM()->AttachCurrentThread(&env, /* thread_args */ nullptr);
+    Runtime::Current()->GetJavaVM()->AttachCurrentThread(&env, /* thr_args= */ nullptr);
 
     std::string tmp = android::base::StringPrintf(
         "a thread (tid %" PRId64 " is making JNI calls without being attached",
diff --git a/runtime/jni/jni_internal.cc b/runtime/jni/jni_internal.cc
index 5200607..52509fd 100644
--- a/runtime/jni/jni_internal.cc
+++ b/runtime/jni/jni_internal.cc
@@ -82,7 +82,7 @@
 static constexpr bool kWarnJniAbort = false;
 
 static bool IsCallerTrusted(Thread* self) REQUIRES_SHARED(Locks::mutator_lock_) {
-  return hiddenapi::IsCallerTrusted(GetCallingClass(self, /* num_frames */ 1));
+  return hiddenapi::IsCallerTrusted(GetCallingClass(self, /* num_frames= */ 1));
 }
 
 template<typename T>
@@ -106,9 +106,9 @@
   instrumentation::Instrumentation* instrumentation = Runtime::Current()->GetInstrumentation();
   if (UNLIKELY(instrumentation->HasFieldWriteListeners())) {
     Thread* self = Thread::Current();
-    ArtMethod* cur_method = self->GetCurrentMethod(/*dex_pc*/ nullptr,
-                                                   /*check_suspended*/ true,
-                                                   /*abort_on_error*/ false);
+    ArtMethod* cur_method = self->GetCurrentMethod(/*dex_pc=*/ nullptr,
+                                                   /*check_suspended=*/ true,
+                                                   /*abort_on_error=*/ false);
 
     if (cur_method == nullptr) {
       // Set/Get Fields can be issued without a method during runtime startup/teardown. Ignore all
@@ -133,9 +133,9 @@
   instrumentation::Instrumentation* instrumentation = Runtime::Current()->GetInstrumentation();
   if (UNLIKELY(instrumentation->HasFieldWriteListeners())) {
     Thread* self = Thread::Current();
-    ArtMethod* cur_method = self->GetCurrentMethod(/*dex_pc*/ nullptr,
-                                                   /*check_suspended*/ true,
-                                                   /*abort_on_error*/ false);
+    ArtMethod* cur_method = self->GetCurrentMethod(/*dex_pc=*/ nullptr,
+                                                   /*check_suspended=*/ true,
+                                                   /*abort_on_error=*/ false);
 
     if (cur_method == nullptr) {
       // Set/Get Fields can be issued without a method during runtime startup/teardown. Ignore all
@@ -157,9 +157,9 @@
   instrumentation::Instrumentation* instrumentation = Runtime::Current()->GetInstrumentation();
   if (UNLIKELY(instrumentation->HasFieldReadListeners())) {
     Thread* self = Thread::Current();
-    ArtMethod* cur_method = self->GetCurrentMethod(/*dex_pc*/ nullptr,
-                                                   /*check_suspended*/ true,
-                                                   /*abort_on_error*/ false);
+    ArtMethod* cur_method = self->GetCurrentMethod(/*dex_pc=*/ nullptr,
+                                                   /*check_suspended=*/ true,
+                                                   /*abort_on_error=*/ false);
 
     if (cur_method == nullptr) {
       // Set/Get Fields can be issued without a method during runtime startup/teardown. Ignore all
diff --git a/runtime/jni/jni_internal_test.cc b/runtime/jni/jni_internal_test.cc
index 4ad4c14..57346b7 100644
--- a/runtime/jni/jni_internal_test.cc
+++ b/runtime/jni/jni_internal_test.cc
@@ -962,11 +962,11 @@
   // Make sure we can actually use it.
   jstring s = env_->NewStringUTF("poop");
   if (mirror::kUseStringCompression) {
-    ASSERT_EQ(mirror::String::GetFlaggedCount(4, /* compressible */ true),
+    ASSERT_EQ(mirror::String::GetFlaggedCount(4, /* compressible= */ true),
               env_->GetIntField(s, fid2));
     // Create incompressible string
     jstring s_16 = env_->NewStringUTF("\u0444\u0444");
-    ASSERT_EQ(mirror::String::GetFlaggedCount(2, /* compressible */ false),
+    ASSERT_EQ(mirror::String::GetFlaggedCount(2, /* compressible= */ false),
               env_->GetIntField(s_16, fid2));
   } else {
     ASSERT_EQ(4, env_->GetIntField(s, fid2));
@@ -1485,7 +1485,7 @@
   ASSERT_NE(weak_global, nullptr);
   env_->DeleteLocalRef(local_ref);
   // GC should clear the weak global.
-  Runtime::Current()->GetHeap()->CollectGarbage(/* clear_soft_references */ false);
+  Runtime::Current()->GetHeap()->CollectGarbage(/* clear_soft_references= */ false);
   jobject new_global_ref = env_->NewGlobalRef(weak_global);
   EXPECT_EQ(new_global_ref, nullptr);
   jobject new_local_ref = env_->NewLocalRef(weak_global);
diff --git a/runtime/method_handles.cc b/runtime/method_handles.cc
index 570fc48..86ad32e 100644
--- a/runtime/method_handles.cc
+++ b/runtime/method_handles.cc
@@ -745,7 +745,7 @@
                                        callee_type,
                                        self,
                                        shadow_frame,
-                                       method_handle /* receiver */,
+                                       /* receiver= */ method_handle,
                                        operands,
                                        result);
   } else {
@@ -1103,7 +1103,7 @@
   if (IsInvokeVarHandle(handle_kind)) {
     return DoVarHandleInvokeTranslation(self,
                                         shadow_frame,
-                                        /*invokeExact*/ false,
+                                        /*invokeExact=*/ false,
                                         method_handle,
                                         callsite_type,
                                         operands,
@@ -1155,7 +1155,7 @@
   } else if (IsInvokeVarHandle(handle_kind)) {
     return DoVarHandleInvokeTranslation(self,
                                         shadow_frame,
-                                        /*invokeExact*/ true,
+                                        /*invokeExact=*/ true,
                                         method_handle,
                                         callsite_type,
                                         operands,
diff --git a/runtime/mirror/class-inl.h b/runtime/mirror/class-inl.h
index 31bc5e4..50b1b90 100644
--- a/runtime/mirror/class-inl.h
+++ b/runtime/mirror/class-inl.h
@@ -1073,8 +1073,8 @@
   T old_value = GetFieldPtrWithSize<T, kVerifyFlags>(member_offset, pointer_size);
   T new_value = visitor(old_value, address);
   if (old_value != new_value) {
-    dest->SetFieldPtrWithSize</* kTransactionActive */ false,
-                              /* kCheckTransaction */ true,
+    dest->SetFieldPtrWithSize</* kTransactionActive= */ false,
+                              /* kCheckTransaction= */ true,
                               kVerifyNone>(member_offset, new_value, pointer_size);
   }
 }
diff --git a/runtime/mirror/class.cc b/runtime/mirror/class.cc
index 26dba02..6a378f0 100644
--- a/runtime/mirror/class.cc
+++ b/runtime/mirror/class.cc
@@ -83,7 +83,7 @@
     Thread* self = Thread::Current();
     if (name == nullptr) {
       // Note: ThrowNullPointerException() requires a message which we deliberately want to omit.
-      self->ThrowNewException("Ljava/lang/NullPointerException;", /* msg */ nullptr);
+      self->ThrowNewException("Ljava/lang/NullPointerException;", /* msg= */ nullptr);
     } else {
       self->ThrowNewException("Ljava/lang/ClassNotFoundException;", name->ToModifiedUtf8().c_str());
     }
diff --git a/runtime/mirror/dex_cache_test.cc b/runtime/mirror/dex_cache_test.cc
index e9e7ca8..36c5ae2 100644
--- a/runtime/mirror/dex_cache_test.cc
+++ b/runtime/mirror/dex_cache_test.cc
@@ -108,7 +108,7 @@
   EXPECT_NE(klass1->NumStaticFields(), 0u);
   for (ArtField& field : klass2->GetSFields()) {
     EXPECT_FALSE(
-        klass1->ResolvedFieldAccessTest</*throw_on_failure*/ false>(
+        klass1->ResolvedFieldAccessTest</*throw_on_failure=*/ false>(
             klass2.Get(),
             &field,
             klass1->GetDexCache(),
diff --git a/runtime/mirror/object-inl.h b/runtime/mirror/object-inl.h
index fbe002a..8ae79a8 100644
--- a/runtime/mirror/object-inl.h
+++ b/runtime/mirror/object-inl.h
@@ -80,11 +80,11 @@
 }
 
 inline mirror::Object* Object::MonitorEnter(Thread* self) {
-  return Monitor::MonitorEnter(self, this, /*trylock*/false);
+  return Monitor::MonitorEnter(self, this, /*trylock=*/false);
 }
 
 inline mirror::Object* Object::MonitorTryEnter(Thread* self) {
-  return Monitor::MonitorEnter(self, this, /*trylock*/true);
+  return Monitor::MonitorEnter(self, this, /*trylock=*/true);
 }
 
 inline bool Object::MonitorExit(Thread* self) {
@@ -738,7 +738,7 @@
 inline ObjPtr<Object> Object::ExchangeFieldObject(MemberOffset field_offset,
                                                   ObjPtr<Object> new_value) {
   VerifyTransaction<kTransactionActive, kCheckTransaction>();
-  VerifyCAS<kVerifyFlags>(new_value, /*old_value*/ nullptr);
+  VerifyCAS<kVerifyFlags>(new_value, /*old_value=*/ nullptr);
 
   uint32_t new_ref(PtrCompression<kPoisonHeapReferences, Object>::Compress(new_value));
   uint8_t* raw_addr = reinterpret_cast<uint8_t*>(this) + field_offset.Int32Value();
diff --git a/runtime/mirror/object-readbarrier-inl.h b/runtime/mirror/object-readbarrier-inl.h
index 8689e4d..ee84997 100644
--- a/runtime/mirror/object-readbarrier-inl.h
+++ b/runtime/mirror/object-readbarrier-inl.h
@@ -131,7 +131,7 @@
     UNREACHABLE();
   }
   DCHECK(kUseBakerReadBarrier);
-  LockWord lw(GetFieldPrimitive<uint32_t, /*kIsVolatile*/false>(MonitorOffset()));
+  LockWord lw(GetFieldPrimitive<uint32_t, /*kIsVolatile=*/false>(MonitorOffset()));
   uint32_t rb_state = lw.ReadBarrierState();
   DCHECK(ReadBarrier::IsValidReadBarrierState(rb_state)) << rb_state;
   return rb_state;
diff --git a/runtime/mirror/string-inl.h b/runtime/mirror/string-inl.h
index 8fa2c6c..3752d6d 100644
--- a/runtime/mirror/string-inl.h
+++ b/runtime/mirror/string-inl.h
@@ -237,7 +237,7 @@
 
 template <bool kIsInstrumented>
 inline String* String::AllocEmptyString(Thread* self, gc::AllocatorType allocator_type) {
-  const int32_t length_with_flag = String::GetFlaggedCount(0, /* compressible */ true);
+  const int32_t length_with_flag = String::GetFlaggedCount(0, /* compressible= */ true);
   SetStringCountVisitor visitor(length_with_flag);
   return Alloc<kIsInstrumented>(self, length_with_flag, allocator_type, visitor);
 }
diff --git a/runtime/monitor.cc b/runtime/monitor.cc
index 02aa1a8..0f0a378 100644
--- a/runtime/monitor.cc
+++ b/runtime/monitor.cc
@@ -289,7 +289,7 @@
     // Is this the requested frame?
     if (current_frame_number_ == wanted_frame_number_) {
       method_ = m;
-      dex_pc_ = GetDexPc(false /* abort_on_error*/);
+      dex_pc_ = GetDexPc(/* abort_on_failure=*/ false);
       return false;
     }
 
@@ -385,7 +385,7 @@
   } else {
     return false;
   }
-  AtraceMonitorLock(self, GetObject(), false /* is_wait */);
+  AtraceMonitorLock(self, GetObject(), /* is_wait= */ false);
   return true;
 }
 
@@ -777,7 +777,7 @@
   AtraceMonitorUnlock();  // For the implict Unlock() just above. This will only end the deepest
                           // nesting, but that is enough for the visualization, and corresponds to
                           // the single Lock() we do afterwards.
-  AtraceMonitorLock(self, GetObject(), true /* is_wait */);
+  AtraceMonitorLock(self, GetObject(), /* is_wait= */ true);
 
   bool was_interrupted = false;
   bool timed_out = false;
@@ -1042,7 +1042,7 @@
         // No ordering required for preceding lockword read, since we retest.
         LockWord thin_locked(LockWord::FromThinLockId(thread_id, 0, lock_word.GCState()));
         if (h_obj->CasLockWord(lock_word, thin_locked, CASMode::kWeak, std::memory_order_acquire)) {
-          AtraceMonitorLock(self, h_obj.Get(), false /* is_wait */);
+          AtraceMonitorLock(self, h_obj.Get(), /* is_wait= */ false);
           return h_obj.Get();  // Success!
         }
         continue;  // Go again.
@@ -1060,8 +1060,8 @@
             // Only this thread pays attention to the count. Thus there is no need for stronger
             // than relaxed memory ordering.
             if (!kUseReadBarrier) {
-              h_obj->SetLockWord(thin_locked, false /* volatile */);
-              AtraceMonitorLock(self, h_obj.Get(), false /* is_wait */);
+              h_obj->SetLockWord(thin_locked, /* as_volatile= */ false);
+              AtraceMonitorLock(self, h_obj.Get(), /* is_wait= */ false);
               return h_obj.Get();  // Success!
             } else {
               // Use CAS to preserve the read barrier state.
@@ -1069,7 +1069,7 @@
                                      thin_locked,
                                      CASMode::kWeak,
                                      std::memory_order_relaxed)) {
-                AtraceMonitorLock(self, h_obj.Get(), false /* is_wait */);
+                AtraceMonitorLock(self, h_obj.Get(), /* is_wait= */ false);
                 return h_obj.Get();  // Success!
               }
             }
diff --git a/runtime/monitor_android.cc b/runtime/monitor_android.cc
index 74623da..19e1f3d 100644
--- a/runtime/monitor_android.cc
+++ b/runtime/monitor_android.cc
@@ -43,7 +43,7 @@
 
   // Emit the process name, <= 37 bytes.
   {
-    int fd = open("/proc/self/cmdline", O_RDONLY);
+    int fd = open("/proc/self/cmdline", O_RDONLY  | O_CLOEXEC);
     char procName[33];
     memset(procName, 0, sizeof(procName));
     read(fd, procName, sizeof(procName) - 1);
diff --git a/runtime/monitor_test.cc b/runtime/monitor_test.cc
index 0b168f8..8610899 100644
--- a/runtime/monitor_test.cc
+++ b/runtime/monitor_test.cc
@@ -361,7 +361,7 @@
     thread_pool.AddTask(self, new TryLockTask(obj1));
     thread_pool.StartWorkers(self);
     ScopedThreadSuspension sts(self, kSuspended);
-    thread_pool.Wait(Thread::Current(), /*do_work*/false, /*may_hold_locks*/false);
+    thread_pool.Wait(Thread::Current(), /*do_work=*/false, /*may_hold_locks=*/false);
   }
   // Test that the trylock actually locks the object.
   {
diff --git a/runtime/native/dalvik_system_DexFile.cc b/runtime/native/dalvik_system_DexFile.cc
index 6becd36..69f7648 100644
--- a/runtime/native/dalvik_system_DexFile.cc
+++ b/runtime/native/dalvik_system_DexFile.cc
@@ -174,10 +174,10 @@
   std::string error_message;
   size_t length = static_cast<size_t>(end - start);
   MemMap dex_mem_map = MemMap::MapAnonymous("DEX data",
-                                            /* addr */ nullptr,
+                                            /* addr= */ nullptr,
                                             length,
                                             PROT_READ | PROT_WRITE,
-                                            /* low_4gb */ false,
+                                            /* low_4gb= */ false,
                                             &error_message);
   if (!dex_mem_map.IsValid()) {
     ScopedObjectAccess soa(env);
@@ -196,8 +196,8 @@
   std::unique_ptr<const DexFile> dex_file(dex_file_loader.Open(location,
                                                                0,
                                                                std::move(dex_mem_map),
-                                                               /* verify */ true,
-                                                               /* verify_location */ true,
+                                                               /* verify= */ true,
+                                                               /* verify_checksum= */ true,
                                                                &error_message));
   if (dex_file == nullptr) {
     ScopedObjectAccess soa(env);
@@ -551,7 +551,7 @@
   }
 
   OatFileAssistant oat_file_assistant(filename.c_str(), target_instruction_set,
-                                      false /* load_executable */);
+                                      /* load_executable= */ false);
   return env->NewStringUTF(oat_file_assistant.GetStatusDump().c_str());
 }
 
@@ -774,7 +774,7 @@
 
   OatFileAssistant oat_file_assistant(filename.c_str(),
                                       target_instruction_set,
-                                      false /* load_executable */);
+                                      /* load_executable= */ false);
 
   std::unique_ptr<OatFile> best_oat_file = oat_file_assistant.GetBestOatFile();
   if (best_oat_file == nullptr) {
diff --git a/runtime/native/dalvik_system_VMDebug.cc b/runtime/native/dalvik_system_VMDebug.cc
index 6f98a6d..24c8d14 100644
--- a/runtime/native/dalvik_system_VMDebug.cc
+++ b/runtime/native/dalvik_system_VMDebug.cc
@@ -23,6 +23,7 @@
 
 #include "nativehelper/jni_macros.h"
 
+#include "base/file_utils.h"
 #include "base/histogram-inl.h"
 #include "base/time_utils.h"
 #include "class_linker.h"
@@ -113,7 +114,7 @@
     return;
   }
 
-  int fd = dup(originalFd);
+  int fd = DupCloexec(originalFd);
   if (fd < 0) {
     ScopedObjectAccess soa(env);
     soa.Self()->ThrowNewExceptionF("Ljava/lang/RuntimeException;",
@@ -366,7 +367,7 @@
 
     VariableSizedHandleScope hs2(soa.Self());
     std::vector<Handle<mirror::Object>> raw_instances;
-    heap->GetInstances(hs2, h_class, includeAssignable, /* max_count */ 0, raw_instances);
+    heap->GetInstances(hs2, h_class, includeAssignable, /* max_count= */ 0, raw_instances);
     jobjectArray array = env->NewObjectArray(raw_instances.size(),
                                              WellKnownClasses::java_lang_Object,
                                              nullptr);
diff --git a/runtime/native/dalvik_system_VMRuntime.cc b/runtime/native/dalvik_system_VMRuntime.cc
index 861d1db..2a3ea46 100644
--- a/runtime/native/dalvik_system_VMRuntime.cc
+++ b/runtime/native/dalvik_system_VMRuntime.cc
@@ -404,7 +404,7 @@
   const DexFile* dex_file = dex_cache->GetDexFile();
   const DexFile::FieldId& field_id = dex_file->GetFieldId(field_idx);
   ObjPtr<mirror::Class> klass = Runtime::Current()->GetClassLinker()->LookupResolvedType(
-      field_id.class_idx_, dex_cache, /* class_loader */ nullptr);
+      field_id.class_idx_, dex_cache, /* class_loader= */ nullptr);
   if (klass == nullptr) {
     return;
   }
@@ -432,12 +432,12 @@
   ClassLinker* class_linker = Runtime::Current()->GetClassLinker();
 
   ObjPtr<mirror::Class> klass = class_linker->LookupResolvedType(
-      method_id.class_idx_, dex_cache, /* class_loader */ nullptr);
+      method_id.class_idx_, dex_cache, /* class_loader= */ nullptr);
   if (klass == nullptr) {
     return;
   }
   // Call FindResolvedMethod to populate the dex cache.
-  class_linker->FindResolvedMethod(klass, dex_cache, /* class_loader */ nullptr, method_idx);
+  class_linker->FindResolvedMethod(klass, dex_cache, /* class_loader= */ nullptr, method_idx);
 }
 
 struct DexCacheStats {
diff --git a/runtime/native/dalvik_system_VMStack.cc b/runtime/native/dalvik_system_VMStack.cc
index e3932df..32733a8 100644
--- a/runtime/native/dalvik_system_VMStack.cc
+++ b/runtime/native/dalvik_system_VMStack.cc
@@ -59,7 +59,7 @@
     ThreadList* thread_list = Runtime::Current()->GetThreadList();
     bool timed_out;
     Thread* thread = thread_list->SuspendThreadByPeer(peer,
-                                                      /* request_suspension */ true,
+                                                      /* request_suspension= */ true,
                                                       SuspendReason::kInternal,
                                                       &timed_out);
     if (thread != nullptr) {
diff --git a/runtime/native/dalvik_system_ZygoteHooks.cc b/runtime/native/dalvik_system_ZygoteHooks.cc
index 72dae47..f54bf87 100644
--- a/runtime/native/dalvik_system_ZygoteHooks.cc
+++ b/runtime/native/dalvik_system_ZygoteHooks.cc
@@ -152,7 +152,8 @@
     // Drop the shared mutator lock.
     ScopedThreadSuspension sts(self, art::ThreadState::kNative);
     // Get exclusive mutator lock with suspend all.
-    ScopedSuspendAll suspend("Checking stacks for non-obsoletable methods!", /*long_suspend*/false);
+    ScopedSuspendAll suspend("Checking stacks for non-obsoletable methods!",
+                             /*long_suspend=*/false);
     MutexLock mu(Thread::Current(), *Locks::thread_list_lock_);
     runtime->GetThreadList()->ForEach(DoCollectNonDebuggableCallback, &classes);
   }
@@ -399,7 +400,7 @@
         env,
         is_system_server,
         Runtime::NativeBridgeAction::kUnload,
-        /*isa*/ nullptr,
+        /*isa=*/ nullptr,
         profile_system_server);
   }
 }
diff --git a/runtime/native/java_lang_Class.cc b/runtime/native/java_lang_Class.cc
index f5039d1..6d94fa1 100644
--- a/runtime/native/java_lang_Class.cc
+++ b/runtime/native/java_lang_Class.cc
@@ -647,7 +647,7 @@
     ObjPtr<mirror::ObjectArray<mirror::Object>> empty_array =
         mirror::ObjectArray<mirror::Object>::Alloc(soa.Self(),
                                                    annotation_array_class,
-                                                   /* length */ 0);
+                                                   /* length= */ 0);
     return soa.AddLocalReference<jobjectArray>(empty_array);
   }
   return soa.AddLocalReference<jobjectArray>(annotations::GetAnnotationsForClass(klass));
diff --git a/runtime/native/java_lang_Thread.cc b/runtime/native/java_lang_Thread.cc
index b7f0a7a..67ad0a4 100644
--- a/runtime/native/java_lang_Thread.cc
+++ b/runtime/native/java_lang_Thread.cc
@@ -147,7 +147,7 @@
   bool timed_out;
   // Take suspend thread lock to avoid races with threads trying to suspend this one.
   Thread* thread = thread_list->SuspendThreadByPeer(peer,
-                                                    /* request_suspension */ true,
+                                                    /* request_suspension= */ true,
                                                     SuspendReason::kInternal,
                                                     &timed_out);
   if (thread != nullptr) {
diff --git a/runtime/native/java_lang_invoke_MethodHandleImpl.cc b/runtime/native/java_lang_invoke_MethodHandleImpl.cc
index 1f2bf09..0b26bd7 100644
--- a/runtime/native/java_lang_invoke_MethodHandleImpl.cc
+++ b/runtime/native/java_lang_invoke_MethodHandleImpl.cc
@@ -48,7 +48,7 @@
   if (handle_kind >= mirror::MethodHandle::kFirstAccessorKind) {
     ArtField* const field = handle->GetTargetField();
     h_object.Assign(mirror::Field::CreateFromArtField<kRuntimePointerSize, false>(
-        soa.Self(), field, false /* force_resolve */));
+        soa.Self(), field, /* force_resolve= */ false));
   } else {
     ArtMethod* const method = handle->GetTargetMethod();
     if (method->IsConstructor()) {
diff --git a/runtime/native/sun_misc_Unsafe.cc b/runtime/native/sun_misc_Unsafe.cc
index 4644480..e021b77 100644
--- a/runtime/native/sun_misc_Unsafe.cc
+++ b/runtime/native/sun_misc_Unsafe.cc
@@ -74,8 +74,8 @@
     mirror::HeapReference<mirror::Object>* field_addr =
         reinterpret_cast<mirror::HeapReference<mirror::Object>*>(
             reinterpret_cast<uint8_t*>(obj.Ptr()) + static_cast<size_t>(offset));
-    ReadBarrier::Barrier<mirror::Object, /* kIsVolatile */ false, kWithReadBarrier,
-        /* kAlwaysUpdateField */ true>(
+    ReadBarrier::Barrier<mirror::Object, /* kIsVolatile= */ false, kWithReadBarrier,
+        /* kAlwaysUpdateField= */ true>(
         obj.Ptr(),
         MemberOffset(offset),
         field_addr);
diff --git a/runtime/oat_file.cc b/runtime/oat_file.cc
index f16c46b..7c320d8 100644
--- a/runtime/oat_file.cc
+++ b/runtime/oat_file.cc
@@ -269,7 +269,7 @@
                                   vdex_filename,
                                   writable,
                                   low_4gb,
-                                  /* unquicken*/ false,
+                                  /* unquicken=*/ false,
                                   error_msg);
   if (vdex_.get() == nullptr) {
     *error_msg = StringPrintf("Failed to load vdex file '%s' %s",
diff --git a/runtime/oat_file.h b/runtime/oat_file.h
index ba08e5e..4294baf 100644
--- a/runtime/oat_file.h
+++ b/runtime/oat_file.h
@@ -226,12 +226,12 @@
     // A representation of an invalid OatClass, used when an OatClass can't be found.
     // See FindOatClass().
     static OatClass Invalid() {
-      return OatClass(/* oat_file */ nullptr,
+      return OatClass(/* oat_file= */ nullptr,
                       ClassStatus::kErrorUnresolved,
                       kOatClassNoneCompiled,
-                      /* bitmap_size */ 0,
-                      /* bitmap_pointer */ nullptr,
-                      /* methods_pointer */ nullptr);
+                      /* bitmap_size= */ 0,
+                      /* bitmap_pointer= */ nullptr,
+                      /* methods_pointer= */ nullptr);
     }
 
    private:
diff --git a/runtime/oat_file_assistant.cc b/runtime/oat_file_assistant.cc
index 754aa40..a06be4c 100644
--- a/runtime/oat_file_assistant.cc
+++ b/runtime/oat_file_assistant.cc
@@ -91,8 +91,8 @@
     : isa_(isa),
       load_executable_(load_executable),
       only_load_system_executable_(only_load_system_executable),
-      odex_(this, /*is_oat_location*/ false),
-      oat_(this, /*is_oat_location*/ true),
+      odex_(this, /*is_oat_location=*/ false),
+      oat_(this, /*is_oat_location=*/ true),
       zip_fd_(zip_fd) {
   CHECK(dex_location != nullptr) << "OatFileAssistant: null dex location";
 
@@ -700,9 +700,9 @@
         }
       } else {
         vdex = VdexFile::Open(vdex_filename,
-                              false /*writeable*/,
-                              false /*low_4gb*/,
-                              false /*unquicken*/,
+                              /*writable=*/ false,
+                              /*low_4gb=*/ false,
+                              /*unquicken=*/ false,
                               &error_msg);
       }
       if (vdex == nullptr) {
diff --git a/runtime/oat_file_assistant_test.cc b/runtime/oat_file_assistant_test.cc
index 3a974df..521e419 100644
--- a/runtime/oat_file_assistant_test.cc
+++ b/runtime/oat_file_assistant_test.cc
@@ -182,8 +182,8 @@
   EXPECT_EQ(-OatFileAssistant::kNoDexOptNeeded,
             oat_file_assistant.GetDexOptNeeded(
                 CompilerFilter::kDefaultCompilerFilter,
-                /* downgrade */ false,
-                /* profile_changed */ false,
+                /* profile_changed= */ false,
+                /* downgrade= */ false,
                 relative_context.get()));
 }
 
@@ -336,11 +336,11 @@
   GenerateOatForTest(dex_location.c_str(),
                      odex_location.c_str(),
                      CompilerFilter::kSpeed,
-                     /* with_alternate_image */ false);
+                     /* with_alternate_image= */ false);
 
-  android::base::unique_fd odex_fd(open(odex_location.c_str(), O_RDONLY));
-  android::base::unique_fd vdex_fd(open(vdex_location.c_str(), O_RDONLY));
-  android::base::unique_fd zip_fd(open(dex_location.c_str(), O_RDONLY));
+  android::base::unique_fd odex_fd(open(odex_location.c_str(), O_RDONLY | O_CLOEXEC));
+  android::base::unique_fd vdex_fd(open(vdex_location.c_str(), O_RDONLY | O_CLOEXEC));
+  android::base::unique_fd zip_fd(open(dex_location.c_str(), O_RDONLY | O_CLOEXEC));
 
   OatFileAssistant oat_file_assistant(dex_location.c_str(),
                                       kRuntimeISA,
@@ -375,17 +375,17 @@
   GenerateOatForTest(dex_location.c_str(),
                      odex_location.c_str(),
                      CompilerFilter::kSpeed,
-                     /* with_alternate_image */ false);
+                     /* with_alternate_image= */ false);
 
-  android::base::unique_fd vdex_fd(open(vdex_location.c_str(), O_RDONLY));
-  android::base::unique_fd zip_fd(open(dex_location.c_str(), O_RDONLY));
+  android::base::unique_fd vdex_fd(open(vdex_location.c_str(), O_RDONLY | O_CLOEXEC));
+  android::base::unique_fd zip_fd(open(dex_location.c_str(), O_RDONLY | O_CLOEXEC));
 
   OatFileAssistant oat_file_assistant(dex_location.c_str(),
                                       kRuntimeISA,
                                       false,
                                       false,
                                       vdex_fd.get(),
-                                      -1 /* oat_fd */,
+                                      /* oat_fd= */ -1,
                                       zip_fd.get());
   EXPECT_EQ(-OatFileAssistant::kDex2OatForBootImage,
       oat_file_assistant.GetDexOptNeeded(CompilerFilter::kSpeed));
@@ -408,16 +408,16 @@
   GenerateOatForTest(dex_location.c_str(),
                      odex_location.c_str(),
                      CompilerFilter::kSpeed,
-                     /* with_alternate_image */ false);
+                     /* with_alternate_image= */ false);
 
-  android::base::unique_fd odex_fd(open(odex_location.c_str(), O_RDONLY));
-  android::base::unique_fd zip_fd(open(dex_location.c_str(), O_RDONLY));
+  android::base::unique_fd odex_fd(open(odex_location.c_str(), O_RDONLY | O_CLOEXEC));
+  android::base::unique_fd zip_fd(open(dex_location.c_str(), O_RDONLY | O_CLOEXEC));
 
   OatFileAssistant oat_file_assistant(dex_location.c_str(),
                                       kRuntimeISA,
                                       false,
                                       false,
-                                      -1 /* vdex_fd */,
+                                      /* vdex_fd= */ -1,
                                       odex_fd.get(),
                                       zip_fd.get());
 
@@ -436,13 +436,13 @@
 
   Copy(GetDexSrc1(), dex_location);
 
-  android::base::unique_fd zip_fd(open(dex_location.c_str(), O_RDONLY));
+  android::base::unique_fd zip_fd(open(dex_location.c_str(), O_RDONLY | O_CLOEXEC));
   OatFileAssistant oat_file_assistant(dex_location.c_str(),
                                       kRuntimeISA,
                                       false,
                                       false,
-                                      -1 /* vdex_fd */,
-                                      -1 /* oat_fd */,
+                                      /* vdex_fd= */ -1,
+                                      /* oat_fd= */ -1,
                                       zip_fd);
   EXPECT_EQ(OatFileAssistant::kDex2OatFromScratch,
       oat_file_assistant.GetDexOptNeeded(CompilerFilter::kSpeed));
@@ -637,7 +637,7 @@
   // Strip the dex file.
   Copy(GetStrippedDexSrc1(), dex_location);
 
-  OatFileAssistant oat_file_assistant(dex_location.c_str(), kRuntimeISA, /*load_executable*/false);
+  OatFileAssistant oat_file_assistant(dex_location.c_str(), kRuntimeISA, /*load_executable=*/false);
 
   // Because the dex file is stripped, the odex file is considered the source
   // of truth for the dex checksums. The oat file should be considered
@@ -730,7 +730,7 @@
   Copy(GetDexSrc1(), dex_location);
   GenerateOatForTest(dex_location.c_str(),
                      CompilerFilter::kSpeed,
-                     /* with_alternate_image */ true);
+                     /* with_alternate_image= */ true);
 
   ScopedNonWritable scoped_non_writable(dex_location);
   ASSERT_TRUE(scoped_non_writable.IsSuccessful());
@@ -765,7 +765,7 @@
   Copy(GetDexSrc1(), dex_location);
   GenerateOatForTest(dex_location.c_str(),
                      CompilerFilter::kExtract,
-                     /* with_alternate_image */ true);
+                     /* with_alternate_image= */ true);
 
   ScopedNonWritable scoped_non_writable(dex_location);
   ASSERT_TRUE(scoped_non_writable.IsSuccessful());
@@ -1167,7 +1167,7 @@
     dex_files = Runtime::Current()->GetOatFileManager().OpenDexFilesFromOat(
         dex_location_.c_str(),
         Runtime::Current()->GetSystemClassLoader(),
-        /*dex_elements*/nullptr,
+        /*dex_elements=*/nullptr,
         &oat_file,
         &error_msgs);
     CHECK(!dex_files.empty()) << android::base::Join(error_msgs, '\n');
@@ -1213,7 +1213,7 @@
     tasks.push_back(std::move(task));
   }
   thread_pool.StartWorkers(self);
-  thread_pool.Wait(self, /* do_work */ true, /* may_hold_locks */ false);
+  thread_pool.Wait(self, /* do_work= */ true, /* may_hold_locks= */ false);
 
   // Verify that tasks which got an oat file got a unique one.
   std::set<const OatFile*> oat_files;
@@ -1335,8 +1335,8 @@
   EXPECT_EQ(OatFileAssistant::kDex2OatFromScratch,
             oat_file_assistant.GetDexOptNeeded(
                   CompilerFilter::kDefaultCompilerFilter,
-                  /* downgrade */ false,
-                  /* profile_changed */ false,
+                  /* profile_changed= */ false,
+                  /* downgrade= */ false,
                   updated_context.get()));
 }
 
diff --git a/runtime/oat_file_manager.cc b/runtime/oat_file_manager.cc
index b9e9d38..7ac1ab4 100644
--- a/runtime/oat_file_manager.cc
+++ b/runtime/oat_file_manager.cc
@@ -181,7 +181,7 @@
 
  private:
   static BitVector GenerateTypeIndexes(const DexFile* dex_file) {
-    BitVector type_indexes(/*start_bits*/0, /*expandable*/true, Allocator::GetMallocAllocator());
+    BitVector type_indexes(/*start_bits=*/0, /*expandable=*/true, Allocator::GetMallocAllocator());
     for (uint16_t i = 0; i < dex_file->NumClassDefs(); ++i) {
       const DexFile::ClassDef& class_def = dex_file->GetClassDef(i);
       uint16_t type_idx = class_def.class_idx_.index_;
@@ -302,12 +302,12 @@
   std::priority_queue<DexFileAndClassPair> queue;
   for (size_t i = 0; i < dex_files_loaded.size(); ++i) {
     if (loaded_types[i].GetIterator() != loaded_types[i].GetIteratorEnd()) {
-      queue.emplace(dex_files_loaded[i], &loaded_types[i], /*from_loaded_oat*/true);
+      queue.emplace(dex_files_loaded[i], &loaded_types[i], /*from_loaded_oat=*/true);
     }
   }
   for (size_t i = 0; i < dex_files_unloaded.size(); ++i) {
     if (unloaded_types[i].GetIterator() != unloaded_types[i].GetIteratorEnd()) {
-      queue.emplace(dex_files_unloaded[i], &unloaded_types[i], /*from_loaded_oat*/false);
+      queue.emplace(dex_files_unloaded[i], &unloaded_types[i], /*from_loaded_oat=*/false);
     }
   }
 
@@ -385,8 +385,8 @@
   // the oat file without addition checks
   ClassLoaderContext::VerificationResult result = context->VerifyClassLoaderContextMatch(
       oat_file->GetClassLoaderContext(),
-      /*verify_names*/ true,
-      /*verify_checksums*/ true);
+      /*verify_names=*/ true,
+      /*verify_checksums=*/ true);
   switch (result) {
     case ClassLoaderContext::VerificationResult::kForcedToSkipChecks:
       return CheckCollisionResult::kSkippedClassLoaderContextSharedLibrary;
diff --git a/runtime/proxy_test.h b/runtime/proxy_test.h
index 411dc7a..23e536d 100644
--- a/runtime/proxy_test.h
+++ b/runtime/proxy_test.h
@@ -47,7 +47,7 @@
 
   // Builds the interfaces array.
   jobjectArray proxyClassInterfaces =
-      soa.Env()->NewObjectArray(interfaces.size(), javaLangClass, /* initialElement */ nullptr);
+      soa.Env()->NewObjectArray(interfaces.size(), javaLangClass, /* initialElement= */ nullptr);
   soa.Self()->AssertNoPendingException();
   for (size_t i = 0; i < interfaces.size(); ++i) {
     soa.Env()->SetObjectArrayElement(proxyClassInterfaces, i,
@@ -62,7 +62,7 @@
   jobjectArray proxyClassMethods = soa.Env()->NewObjectArray(
       methods_count,
       soa.AddLocalReference<jclass>(GetClassRoot<mirror::Method>()),
-      /* initialElement */ nullptr);
+      /* initialElement= */ nullptr);
   soa.Self()->AssertNoPendingException();
 
   jsize array_index = 0;
diff --git a/runtime/quick_exception_handler.cc b/runtime/quick_exception_handler.cc
index 36a6b7f..afdfefa 100644
--- a/runtime/quick_exception_handler.cc
+++ b/runtime/quick_exception_handler.cc
@@ -126,7 +126,7 @@
         exception_handler_->SetHandlerDexPc(found_dex_pc);
         exception_handler_->SetHandlerQuickFramePc(
             GetCurrentOatQuickMethodHeader()->ToNativeQuickPc(
-                method, found_dex_pc, /* is_catch_handler */ true));
+                method, found_dex_pc, /* is_for_catch_handler= */ true));
         exception_handler_->SetHandlerQuickFrame(GetCurrentQuickFrame());
         exception_handler_->SetHandlerMethodHeader(GetCurrentOatQuickMethodHeader());
         return false;  // End stack walk.
@@ -218,7 +218,10 @@
     }
 
     // Walk the stack to find catch handler.
-    CatchBlockStackVisitor visitor(self_, context_, &exception_ref, this, /*skip*/already_popped);
+    CatchBlockStackVisitor visitor(self_, context_,
+                                   &exception_ref,
+                                   this,
+                                   /*skip_frames=*/already_popped);
     visitor.WalkStack(true);
     uint32_t new_pop_count = handler_frame_depth_;
     DCHECK_GE(new_pop_count, already_popped);
@@ -606,7 +609,7 @@
               << deopt_method->PrettyMethod()
               << " due to "
               << GetDeoptimizationKindName(kind);
-    DumpFramesWithType(self_, /* details */ true);
+    DumpFramesWithType(self_, /* details= */ true);
   }
   if (Runtime::Current()->UseJitCompilation()) {
     Runtime::Current()->GetJit()->GetCodeCache()->InvalidateCompiledCodeFor(
diff --git a/runtime/runtime-inl.h b/runtime/runtime-inl.h
index bde0d11..e6cc471 100644
--- a/runtime/runtime-inl.h
+++ b/runtime/runtime-inl.h
@@ -25,7 +25,9 @@
 #include "base/casts.h"
 #include "entrypoints/quick/callee_save_frame.h"
 #include "gc_root-inl.h"
+#include "interpreter/mterp/mterp.h"
 #include "obj_ptr-inl.h"
+#include "thread_list.h"
 
 namespace art {
 
@@ -86,6 +88,15 @@
   return reinterpret_cast64<ArtMethod*>(callee_save_methods_[static_cast<size_t>(type)]);
 }
 
+template<typename Action>
+void Runtime::DoAndMaybeSwitchInterpreter(Action lamda) {
+  MutexLock tll_mu(Thread::Current(), *Locks::thread_list_lock_);
+  lamda();
+  Runtime::Current()->GetThreadList()->ForEach([](Thread* thread, void*) {
+      thread->tls32_.use_mterp.store(interpreter::CanUseMterp());
+  }, nullptr);
+}
+
 }  // namespace art
 
 #endif  // ART_RUNTIME_RUNTIME_INL_H_
diff --git a/runtime/runtime.cc b/runtime/runtime.cc
index 4d77b9d..7fa5607 100644
--- a/runtime/runtime.cc
+++ b/runtime/runtime.cc
@@ -305,15 +305,15 @@
     // Very few things are actually capable of distinguishing between the peer & peerless states so
     // this should be fine.
     bool thread_attached = AttachCurrentThread("Shutdown thread",
-                                               /* as_daemon */ false,
+                                               /* as_daemon= */ false,
                                                GetSystemThreadGroup(),
-                                               /* Create peer */ IsStarted());
+                                               /* create_peer= */ IsStarted());
     if (UNLIKELY(!thread_attached)) {
       LOG(WARNING) << "Failed to attach shutdown thread. Trying again without a peer.";
       CHECK(AttachCurrentThread("Shutdown thread (no java peer)",
-                                /* as_daemon */   false,
-                                /* thread_group*/ nullptr,
-                                /* Create peer */ false));
+                                /* as_daemon= */   false,
+                                /* thread_group=*/ nullptr,
+                                /* create_peer= */ false));
     }
     self = Thread::Current();
   } else {
@@ -614,7 +614,7 @@
                            bool ignore_unrecognized,
                            RuntimeArgumentMap* runtime_options) {
   Locks::Init();
-  InitLogging(/* argv */ nullptr, Abort);  // Calls Locks::Init() as a side effect.
+  InitLogging(/* argv= */ nullptr, Abort);  // Calls Locks::Init() as a side effect.
   bool parsed = ParsedOptions::Parse(raw_options, ignore_unrecognized, runtime_options);
   if (!parsed) {
     LOG(ERROR) << "Failed to parse options";
@@ -815,7 +815,7 @@
         ? NativeBridgeAction::kInitialize
         : NativeBridgeAction::kUnload;
     InitNonZygoteOrPostFork(self->GetJniEnv(),
-                            /* is_system_server */ false,
+                            /* is_system_server= */ false,
                             action,
                             GetInstructionSetString(kRuntimeISA));
   }
@@ -1002,9 +1002,9 @@
     std::string error_msg;
 
     std::unique_ptr<VdexFile> vdex_file(VdexFile::Open(vdex_filename,
-                                                       false /* writable */,
-                                                       false /* low_4gb */,
-                                                       false, /* unquicken */
+                                                       /* writable= */ false,
+                                                       /* low_4gb= */ false,
+                                                       /* unquicken= */ false,
                                                        &error_msg));
     if (vdex_file.get() == nullptr) {
       return false;
@@ -1015,15 +1015,15 @@
       return false;
     }
     std::unique_ptr<ElfFile> elf_file(ElfFile::Open(file.get(),
-                                                    false /* writable */,
-                                                    false /* program_header_only */,
-                                                    false /* low_4gb */,
+                                                    /* writable= */ false,
+                                                    /* program_header_only= */ false,
+                                                    /* low_4gb= */ false,
                                                     &error_msg));
     if (elf_file.get() == nullptr) {
       return false;
     }
     std::unique_ptr<const OatFile> oat_file(
-        OatFile::OpenWithElfFile(/* zip_fd */ -1,
+        OatFile::OpenWithElfFile(/* zip_fd= */ -1,
                                  elf_file.release(),
                                  vdex_file.release(),
                                  oat_location,
@@ -1117,7 +1117,7 @@
   CHECK(klass != nullptr);
   gc::AllocatorType allocator_type = runtime->GetHeap()->GetCurrentAllocator();
   ObjPtr<mirror::Throwable> exception_object = ObjPtr<mirror::Throwable>::DownCast(
-      klass->Alloc</* kIsInstrumented */ true>(self, allocator_type));
+      klass->Alloc</* kIsInstrumented= */ true>(self, allocator_type));
   CHECK(exception_object != nullptr);
   *exception = GcRoot<mirror::Throwable>(exception_object);
   // Initialize the "detailMessage" field.
@@ -1127,7 +1127,7 @@
   ArtField* detailMessageField =
       throwable->FindDeclaredInstanceField("detailMessage", "Ljava/lang/String;");
   CHECK(detailMessageField != nullptr);
-  detailMessageField->SetObject</* kTransactionActive */ false>(exception->Read(), message);
+  detailMessageField->SetObject</* kTransactionActive= */ false>(exception->Read(), message);
 }
 
 bool Runtime::Init(RuntimeArgumentMap&& runtime_options_in) {
@@ -1160,8 +1160,8 @@
                                                  reinterpret_cast<uint8_t*>(kSentinelAddr),
                                                  kPageSize,
                                                  PROT_NONE,
-                                                 /* low_4g */ true,
-                                                 /* error_msg */ nullptr);
+                                                 /* low_4gb= */ true,
+                                                 /* error_msg= */ nullptr);
     if (!protected_fault_page_.IsValid()) {
       LOG(WARNING) << "Could not reserve sentinel fault page";
     } else if (reinterpret_cast<uintptr_t>(protected_fault_page_.Begin()) != kSentinelAddr) {
@@ -1371,13 +1371,13 @@
     arena_pool_.reset(new MallocArenaPool());
     jit_arena_pool_.reset(new MallocArenaPool());
   } else {
-    arena_pool_.reset(new MemMapArenaPool(/* low_4gb */ false));
-    jit_arena_pool_.reset(new MemMapArenaPool(/* low_4gb */ false, "CompilerMetadata"));
+    arena_pool_.reset(new MemMapArenaPool(/* low_4gb= */ false));
+    jit_arena_pool_.reset(new MemMapArenaPool(/* low_4gb= */ false, "CompilerMetadata"));
   }
 
   if (IsAotCompiler() && Is64BitInstructionSet(kRuntimeISA)) {
     // 4gb, no malloc. Explanation in header.
-    low_4gb_arena_pool_.reset(new MemMapArenaPool(/* low_4gb */ true));
+    low_4gb_arena_pool_.reset(new MemMapArenaPool(/* low_4gb= */ true));
   }
   linear_alloc_.reset(CreateLinearAlloc());
 
@@ -2148,7 +2148,7 @@
     method->SetEntryPointFromQuickCompiledCode(GetQuickImtConflictStub());
   }
   // Create empty conflict table.
-  method->SetImtConflictTable(class_linker->CreateImtConflictTable(/*count*/0u, linear_alloc),
+  method->SetImtConflictTable(class_linker->CreateImtConflictTable(/*count=*/0u, linear_alloc),
                               pointer_size);
   return method;
 }
@@ -2280,7 +2280,7 @@
     LOG(WARNING) << "JIT profile information will not be recorded: profile filename is empty.";
     return;
   }
-  if (!OS::FileExists(profile_output_filename.c_str(), false /*check_file_type*/)) {
+  if (!OS::FileExists(profile_output_filename.c_str(), /*check_file_type=*/ false)) {
     LOG(WARNING) << "JIT profile information will not be recorded: profile file does not exits.";
     return;
   }
@@ -2519,12 +2519,12 @@
   const PointerSize pointer_size = GetClassLinker()->GetImagePointerSize();
   if (imt_unimplemented_method_->GetImtConflictTable(pointer_size) == nullptr) {
     imt_unimplemented_method_->SetImtConflictTable(
-        ClassLinker::CreateImtConflictTable(/*count*/0u, GetLinearAlloc(), pointer_size),
+        ClassLinker::CreateImtConflictTable(/*count=*/0u, GetLinearAlloc(), pointer_size),
         pointer_size);
   }
   if (imt_conflict_method_->GetImtConflictTable(pointer_size) == nullptr) {
     imt_conflict_method_->SetImtConflictTable(
-          ClassLinker::CreateImtConflictTable(/*count*/0u, GetLinearAlloc(), pointer_size),
+          ClassLinker::CreateImtConflictTable(/*count=*/0u, GetLinearAlloc(), pointer_size),
           pointer_size);
   }
 }
diff --git a/runtime/runtime.h b/runtime/runtime.h
index 398a48d..e27c87d 100644
--- a/runtime/runtime.h
+++ b/runtime/runtime.h
@@ -659,7 +659,7 @@
   }
 
   void SetNonStandardExitsEnabled() {
-    non_standard_exits_enabled_ = true;
+    DoAndMaybeSwitchInterpreter([=](){ non_standard_exits_enabled_ = true; });
   }
 
   bool AreAsyncExceptionsThrown() const {
@@ -667,9 +667,20 @@
   }
 
   void SetAsyncExceptionsThrown() {
-    async_exceptions_thrown_ = true;
+    DoAndMaybeSwitchInterpreter([=](){ async_exceptions_thrown_ = true; });
   }
 
+  // Change state and re-check which interpreter should be used.
+  //
+  // This must be called whenever there is an event that forces
+  // us to use different interpreter (e.g. debugger is attached).
+  //
+  // Changing the state using the lamda gives us some multihreading safety.
+  // It ensures that two calls do not interfere with each other and
+  // it makes it possible to DCHECK that thread local flag is correct.
+  template<typename Action>
+  static void DoAndMaybeSwitchInterpreter(Action lamda);
+
   // Returns the build fingerprint, if set. Otherwise an empty string is returned.
   std::string GetFingerprint() {
     return fingerprint_;
diff --git a/runtime/runtime_android.cc b/runtime/runtime_android.cc
index 4bd3b3a..55ba293 100644
--- a/runtime/runtime_android.cc
+++ b/runtime/runtime_android.cc
@@ -30,8 +30,8 @@
   HandleUnexpectedSignalCommon(signal_number,
                                info,
                                raw_context,
-                               /* handle_timeout_signal */ false,
-                               /* dump_on_stderr */ false);
+                               /* handle_timeout_signal= */ false,
+                               /* dump_on_stderr= */ false);
 
   // Run the old signal handler.
   old_action.sa_sigaction(signal_number, info, raw_context);
@@ -44,7 +44,7 @@
   if (android_root != nullptr && strcmp(android_root, "/system") != 0) {
     InitPlatformSignalHandlersCommon(HandleUnexpectedSignalAndroid,
                                      &old_action,
-                                     /* handle_timeout_signal */ false);
+                                     /* handle_timeout_signal= */ false);
   }
 }
 
diff --git a/runtime/runtime_callbacks_test.cc b/runtime/runtime_callbacks_test.cc
index 89f3124..20b3327 100644
--- a/runtime/runtime_callbacks_test.cc
+++ b/runtime/runtime_callbacks_test.cc
@@ -191,10 +191,10 @@
 TEST_F(ThreadLifecycleCallbackRuntimeCallbacksTest, ThreadLifecycleCallbackAttach) {
   std::string error_msg;
   MemMap stack = MemMap::MapAnonymous("ThreadLifecycleCallback Thread",
-                                      /* addr */ nullptr,
+                                      /* addr= */ nullptr,
                                       128 * kPageSize,  // Just some small stack.
                                       PROT_READ | PROT_WRITE,
-                                      /* low_4gb */ false,
+                                      /* low_4gb= */ false,
                                       &error_msg);
   ASSERT_TRUE(stack.IsValid()) << error_msg;
 
@@ -505,10 +505,10 @@
           self,
           // Just a random class
           soa.Decode<mirror::Class>(WellKnownClasses::java_util_Collections).Ptr(),
-          /*ms*/0,
-          /*ns*/0,
-          /*interruptShouldThrow*/false,
-          /*why*/kWaiting);
+          /*ms=*/0,
+          /*ns=*/0,
+          /*interruptShouldThrow=*/false,
+          /*why=*/kWaiting);
     }
   }
   ASSERT_TRUE(cb_.saw_wait_start_);
diff --git a/runtime/runtime_linux.cc b/runtime/runtime_linux.cc
index 6313553..cfa8ea6 100644
--- a/runtime/runtime_linux.cc
+++ b/runtime/runtime_linux.cc
@@ -31,8 +31,8 @@
   HandleUnexpectedSignalCommon(signal_number,
                                info,
                                raw_context,
-                               /* handle_timeout_signal */ true,
-                               /* dump_on_stderr */ true);
+                               /* handle_timeout_signal= */ true,
+                               /* dump_on_stderr= */ true);
 
   if (getenv("debug_db_uid") != nullptr || getenv("art_wait_for_gdb_on_crash") != nullptr) {
     pid_t tid = GetTid();
@@ -77,7 +77,7 @@
   // On the host, we don't have debuggerd to dump a stack for us when something unexpected happens.
   InitPlatformSignalHandlersCommon(HandleUnexpectedSignalLinux,
                                    nullptr,
-                                   /* handle_timeout_signal */ true);
+                                   /* handle_timeout_signal= */ true);
 }
 
 }  // namespace art
diff --git a/runtime/signal_catcher.cc b/runtime/signal_catcher.cc
index f4a27b8..38ea9cc 100644
--- a/runtime/signal_catcher.cc
+++ b/runtime/signal_catcher.cc
@@ -118,7 +118,7 @@
 
   ScopedThreadStateChange tsc(Thread::Current(), kWaitingForSignalCatcherOutput);
 
-  std::unique_ptr<File> file(new File(output_fd.release(), true /* check_usage */));
+  std::unique_ptr<File> file(new File(output_fd.release(), true /* check_usage= */));
   bool success = file->WriteFully(s.data(), s.size());
   if (success) {
     success = file->FlushCloseOrErase() == 0;
@@ -169,7 +169,7 @@
 
 void SignalCatcher::HandleSigUsr1() {
   LOG(INFO) << "SIGUSR1 forcing GC (no HPROF) and profile save";
-  Runtime::Current()->GetHeap()->CollectGarbage(/* clear_soft_references */ false);
+  Runtime::Current()->GetHeap()->CollectGarbage(/* clear_soft_references= */ false);
   ProfileSaver::ForceProcessProfiles();
 }
 
diff --git a/runtime/stack_map.h b/runtime/stack_map.h
index 5f44286..811e23b 100644
--- a/runtime/stack_map.h
+++ b/runtime/stack_map.h
@@ -358,7 +358,7 @@
   ALWAYS_INLINE DexRegisterMap GetDexRegisterMapOf(StackMap stack_map) const {
     if (stack_map.HasDexRegisterMap()) {
       DexRegisterMap map(number_of_dex_registers_, DexRegisterLocation::Invalid());
-      DecodeDexRegisterMap(stack_map.Row(), /* first_dex_register */ 0, &map);
+      DecodeDexRegisterMap(stack_map.Row(), /* first_dex_register= */ 0, &map);
       return map;
     }
     return DexRegisterMap(0, DexRegisterLocation::None());
diff --git a/runtime/subtype_check.h b/runtime/subtype_check.h
index aac547e..106c7f1 100644
--- a/runtime/subtype_check.h
+++ b/runtime/subtype_check.h
@@ -237,7 +237,7 @@
   static SubtypeCheckInfo::State EnsureInitialized(ClassPtr klass)
       REQUIRES(Locks::subtype_check_lock_)
       REQUIRES_SHARED(Locks::mutator_lock_) {
-    return InitializeOrAssign(klass, /*assign*/false).GetState();
+    return InitializeOrAssign(klass, /*assign=*/false).GetState();
   }
 
   // Force this class's SubtypeCheckInfo state into Assigned|Overflowed.
@@ -250,7 +250,7 @@
   static SubtypeCheckInfo::State EnsureAssigned(ClassPtr klass)
       REQUIRES(Locks::subtype_check_lock_)
       REQUIRES_SHARED(Locks::mutator_lock_) {
-    return InitializeOrAssign(klass, /*assign*/true).GetState();
+    return InitializeOrAssign(klass, /*assign=*/true).GetState();
   }
 
   // Resets the SubtypeCheckInfo into the Uninitialized state.
@@ -398,7 +398,7 @@
 
     // Force all ancestors to Assigned | Overflowed.
     ClassPtr parent_klass = GetParentClass(klass);
-    size_t parent_depth = InitializeOrAssign(parent_klass, /*assign*/true).GetDepth();
+    size_t parent_depth = InitializeOrAssign(parent_klass, /*assign=*/true).GetDepth();
     if (kIsDebugBuild) {
       SubtypeCheckInfo::State parent_state = GetSubtypeCheckInfo(parent_klass).GetState();
       DCHECK(parent_state == SubtypeCheckInfo::kAssigned ||
@@ -542,17 +542,17 @@
                                                    int32_t new_value)
       REQUIRES_SHARED(Locks::mutator_lock_) {
     if (Runtime::Current() != nullptr && Runtime::Current()->IsActiveTransaction()) {
-      return klass->template CasField32</*kTransactionActive*/true>(offset,
-                                                                    old_value,
-                                                                    new_value,
-                                                                    CASMode::kWeak,
-                                                                    std::memory_order_seq_cst);
-    } else {
-      return klass->template CasField32</*kTransactionActive*/false>(offset,
+      return klass->template CasField32</*kTransactionActive=*/true>(offset,
                                                                      old_value,
                                                                      new_value,
                                                                      CASMode::kWeak,
                                                                      std::memory_order_seq_cst);
+    } else {
+      return klass->template CasField32</*kTransactionActive=*/false>(offset,
+                                                                      old_value,
+                                                                      new_value,
+                                                                      CASMode::kWeak,
+                                                                      std::memory_order_seq_cst);
     }
   }
 
diff --git a/runtime/subtype_check_bits.h b/runtime/subtype_check_bits.h
index 462f203..23d8ac3 100644
--- a/runtime/subtype_check_bits.h
+++ b/runtime/subtype_check_bits.h
@@ -56,9 +56,9 @@
  *
  * See subtype_check.h and subtype_check_info.h for more details.
  */
-BITSTRUCT_DEFINE_START(SubtypeCheckBits, /*size*/ BitString::BitStructSizeOf() + 1u)
-  BitStructField<BitString, /*lsb*/ 0> bitstring_;
-  BitStructUint</*lsb*/ BitString::BitStructSizeOf(), /*width*/ 1> overflow_;
+BITSTRUCT_DEFINE_START(SubtypeCheckBits, /*size=*/ BitString::BitStructSizeOf() + 1u)
+  BitStructField<BitString, /*lsb=*/ 0> bitstring_;
+  BitStructUint</*lsb=*/ BitString::BitStructSizeOf(), /*width=*/ 1> overflow_;
 BITSTRUCT_DEFINE_END(SubtypeCheckBits);
 
 }  // namespace art
diff --git a/runtime/subtype_check_bits_and_status.h b/runtime/subtype_check_bits_and_status.h
index 321a723..eec6e21 100644
--- a/runtime/subtype_check_bits_and_status.h
+++ b/runtime/subtype_check_bits_and_status.h
@@ -68,11 +68,11 @@
 static constexpr size_t kClassStatusBitSize = MinimumBitsToStore(enum_cast<>(ClassStatus::kLast));
 static_assert(kClassStatusBitSize == 4u, "ClassStatus should need 4 bits.");
 BITSTRUCT_DEFINE_START(SubtypeCheckBitsAndStatus, BitSizeOf<BitString::StorageType>())
-  BitStructField<SubtypeCheckBits, /*lsb*/ 0> subtype_check_info_;
+  BitStructField<SubtypeCheckBits, /*lsb=*/ 0> subtype_check_info_;
   BitStructField<ClassStatus,
-                 /*lsb*/ SubtypeCheckBits::BitStructSizeOf(),
-                 /*width*/ kClassStatusBitSize> status_;
-  BitStructInt</*lsb*/ 0, /*width*/ BitSizeOf<BitString::StorageType>()> int32_alias_;
+                 /*lsb=*/ SubtypeCheckBits::BitStructSizeOf(),
+                 /*width=*/ kClassStatusBitSize> status_;
+  BitStructInt</*lsb=*/ 0, /*width=*/ BitSizeOf<BitString::StorageType>()> int32_alias_;
 BITSTRUCT_DEFINE_END(SubtypeCheckBitsAndStatus);
 
 // Use the spare alignment from "ClassStatus" to store all the new SubtypeCheckInfo data.
diff --git a/runtime/subtype_check_info_test.cc b/runtime/subtype_check_info_test.cc
index 9bd135e..44a2a69 100644
--- a/runtime/subtype_check_info_test.cc
+++ b/runtime/subtype_check_info_test.cc
@@ -87,7 +87,7 @@
 struct SubtypeCheckInfoTest : public ::testing::Test {
  protected:
   void SetUp() override {
-    android::base::InitLogging(/*argv*/nullptr);
+    android::base::InitLogging(/*argv=*/nullptr);
   }
 
   void TearDown() override {
@@ -158,33 +158,33 @@
 
   // Illegal values during construction would cause a Dcheck failure and crash.
   ASSERT_DEATH(MakeSubtypeCheckInfo(MakeBitString({1u}),
-                                    /*next*/MakeBitStringChar(0),
-                                    /*overflow*/false,
-                                    /*depth*/0u),
+                                    /*next=*/MakeBitStringChar(0),
+                                    /*overflow=*/false,
+                                    /*depth=*/0u),
                GetExpectedMessageForDeathTest("Path was too long for the depth"));
   ASSERT_DEATH(MakeSubtypeCheckInfoInfused(MakeBitString({1u, 1u}),
-                                           /*overflow*/false,
-                                           /*depth*/0u),
+                                           /*overflow=*/false,
+                                           /*depth=*/0u),
                GetExpectedMessageForDeathTest("Bitstring too long for depth"));
   ASSERT_DEATH(MakeSubtypeCheckInfo(MakeBitString({1u}),
-                                    /*next*/MakeBitStringChar(0),
-                                    /*overflow*/false,
-                                    /*depth*/1u),
+                                    /*next=*/MakeBitStringChar(0),
+                                    /*overflow=*/false,
+                                    /*depth=*/1u),
                GetExpectedMessageForDeathTest("Expected \\(Assigned\\|Initialized\\) "
                                               "state to have >0 Next value"));
   ASSERT_DEATH(MakeSubtypeCheckInfoInfused(MakeBitString({0u, 2u, 1u}),
-                                           /*overflow*/false,
-                                           /*depth*/2u),
+                                           /*overflow=*/false,
+                                           /*depth=*/2u),
                GetExpectedMessageForDeathTest("Path to root had non-0s following 0s"));
   ASSERT_DEATH(MakeSubtypeCheckInfo(MakeBitString({0u, 2u}),
-                                    /*next*/MakeBitStringChar(1u),
-                                    /*overflow*/false,
-                                    /*depth*/2u),
+                                    /*next=*/MakeBitStringChar(1u),
+                                    /*overflow=*/false,
+                                    /*depth=*/2u),
                GetExpectedMessageForDeathTest("Path to root had non-0s following 0s"));
   ASSERT_DEATH(MakeSubtypeCheckInfo(MakeBitString({0u, 1u, 1u}),
-                                    /*next*/MakeBitStringChar(0),
-                                    /*overflow*/false,
-                                    /*depth*/3u),
+                                    /*next=*/MakeBitStringChar(0),
+                                    /*overflow=*/false,
+                                    /*depth=*/3u),
                GetExpectedMessageForDeathTest("Path to root had non-0s following 0s"));
 
   // These are really slow (~1sec per death test on host),
@@ -194,62 +194,62 @@
 TEST_F(SubtypeCheckInfoTest, States) {
   EXPECT_EQ(SubtypeCheckInfo::kUninitialized, MakeSubtypeCheckInfo().GetState());
   EXPECT_EQ(SubtypeCheckInfo::kInitialized,
-            MakeSubtypeCheckInfo(/*path*/{}, /*next*/MakeBitStringChar(1)).GetState());
+            MakeSubtypeCheckInfo(/*path_to_root=*/{}, /*next=*/MakeBitStringChar(1)).GetState());
   EXPECT_EQ(SubtypeCheckInfo::kOverflowed,
-            MakeSubtypeCheckInfo(/*path*/{},
-                                 /*next*/MakeBitStringChar(1),
-                                 /*overflow*/true,
-                                 /*depth*/1u).GetState());
+            MakeSubtypeCheckInfo(/*path_to_root=*/{},
+                                 /*next=*/MakeBitStringChar(1),
+                                 /*overflow=*/true,
+                                 /*depth=*/1u).GetState());
   EXPECT_EQ(SubtypeCheckInfo::kAssigned,
-            MakeSubtypeCheckInfo(/*path*/MakeBitString({1u}),
-                                 /*next*/MakeBitStringChar(1),
-                                 /*overflow*/false,
-                                 /*depth*/1u).GetState());
+            MakeSubtypeCheckInfo(/*path_to_root=*/MakeBitString({1u}),
+                                 /*next=*/MakeBitStringChar(1),
+                                 /*overflow=*/false,
+                                 /*depth=*/1u).GetState());
 
   // Test edge conditions: depth == BitString::kCapacity (No Next value).
   EXPECT_EQ(SubtypeCheckInfo::kAssigned,
-            MakeSubtypeCheckInfo(/*path*/MakeBitStringMax(),
-                                 /*next*/MakeBitStringChar(0),
-                                 /*overflow*/false,
-                                 /*depth*/BitString::kCapacity).GetState());
+            MakeSubtypeCheckInfo(/*path_to_root=*/MakeBitStringMax(),
+                                 /*next=*/MakeBitStringChar(0),
+                                 /*overflow=*/false,
+                                 /*depth=*/BitString::kCapacity).GetState());
   EXPECT_EQ(SubtypeCheckInfo::kInitialized,
-            MakeSubtypeCheckInfo(/*path*/MakeBitStringMax<BitString::kCapacity - 1u>(),
-                                 /*next*/MakeBitStringChar(0),
-                                 /*overflow*/false,
-                                 /*depth*/BitString::kCapacity).GetState());
+            MakeSubtypeCheckInfo(/*path_to_root=*/MakeBitStringMax<BitString::kCapacity - 1u>(),
+                                 /*next=*/MakeBitStringChar(0),
+                                 /*overflow=*/false,
+                                 /*depth=*/BitString::kCapacity).GetState());
   // Test edge conditions: depth > BitString::kCapacity (Must overflow).
   EXPECT_EQ(SubtypeCheckInfo::kOverflowed,
-            MakeSubtypeCheckInfo(/*path*/MakeBitStringMax(),
-                                 /*next*/MakeBitStringChar(0),
-                                 /*overflow*/true,
-                                 /*depth*/BitString::kCapacity + 1u).GetState());
+            MakeSubtypeCheckInfo(/*path_to_root=*/MakeBitStringMax(),
+                                 /*next=*/MakeBitStringChar(0),
+                                 /*overflow=*/true,
+                                 /*depth=*/BitString::kCapacity + 1u).GetState());
 }
 
 TEST_F(SubtypeCheckInfoTest, NextValue) {
   // Validate "Next" is correctly aliased as the Bitstring[Depth] character.
   EXPECT_EQ(MakeBitStringChar(1u), MakeSubtypeCheckInfoUnchecked(MakeBitString({1u, 2u, 3u}),
-                                                           /*overflow*/false,
-                                                           /*depth*/0u).GetNext());
+                                                                 /*overflow=*/false,
+                                                                 /*depth=*/0u).GetNext());
   EXPECT_EQ(MakeBitStringChar(2u), MakeSubtypeCheckInfoUnchecked(MakeBitString({1u, 2u, 3u}),
-                                                           /*overflow*/false,
-                                                           /*depth*/1u).GetNext());
+                                                                 /*overflow=*/false,
+                                                                 /*depth=*/1u).GetNext());
   EXPECT_EQ(MakeBitStringChar(3u), MakeSubtypeCheckInfoUnchecked(MakeBitString({1u, 2u, 3u}),
-                                                           /*overflow*/false,
-                                                           /*depth*/2u).GetNext());
+                                                                 /*overflow=*/false,
+                                                                 /*depth=*/2u).GetNext());
   EXPECT_EQ(MakeBitStringChar(1u), MakeSubtypeCheckInfoUnchecked(MakeBitString({0u, 2u, 1u}),
-                                                           /*overflow*/false,
-                                                           /*depth*/2u).GetNext());
+                                                                 /*overflow=*/false,
+                                                                 /*depth=*/2u).GetNext());
   // Test edge conditions: depth == BitString::kCapacity (No Next value).
   EXPECT_FALSE(HasNext(MakeSubtypeCheckInfoUnchecked(MakeBitStringMax<BitString::kCapacity>(),
-                                                     /*overflow*/false,
-                                                     /*depth*/BitString::kCapacity)));
+                                                     /*overflow=*/false,
+                                                     /*depth=*/BitString::kCapacity)));
   // Anything with depth >= BitString::kCapacity has no next value.
   EXPECT_FALSE(HasNext(MakeSubtypeCheckInfoUnchecked(MakeBitStringMax<BitString::kCapacity>(),
-                                                     /*overflow*/false,
-                                                     /*depth*/BitString::kCapacity + 1u)));
+                                                     /*overflow=*/false,
+                                                     /*depth=*/BitString::kCapacity + 1u)));
   EXPECT_FALSE(HasNext(MakeSubtypeCheckInfoUnchecked(MakeBitStringMax(),
-                                                     /*overflow*/false,
-                                                     /*depth*/std::numeric_limits<size_t>::max())));
+                                                     /*overflow=*/false,
+                                                     /*depth=*/std::numeric_limits<size_t>::max())));
 }
 
 template <size_t kPos = BitString::kCapacity>
@@ -259,10 +259,10 @@
   using StorageType = BitString::StorageType;
 
   SubtypeCheckInfo sci =
-      MakeSubtypeCheckInfo(/*path_to_root*/MakeBitStringMax(),
-                           /*next*/BitStringChar{},
-                           /*overflow*/false,
-                           /*depth*/BitString::kCapacity);
+      MakeSubtypeCheckInfo(/*path_to_root=*/MakeBitStringMax(),
+                           /*next=*/BitStringChar{},
+                           /*overflow=*/false,
+                           /*depth=*/BitString::kCapacity);
   // 0b000...111 where LSB == 1, and trailing 1s = the maximum bitstring representation.
   EXPECT_EQ(MaxInt<StorageType>(LenForPos()), sci.GetEncodedPathToRoot());
 
@@ -275,8 +275,8 @@
 
   SubtypeCheckInfo sci2 =
       MakeSubtypeCheckInfoUnchecked(MakeBitStringMax<2u>(),
-                                   /*overflow*/false,
-                                   /*depth*/BitString::kCapacity);
+                                   /*overflow=*/false,
+                                   /*depth=*/BitString::kCapacity);
 
 #define MAKE_ENCODED_PATH(pos0, pos1, pos2) \
     (((pos0) << 0) | \
@@ -290,8 +290,8 @@
 
   SubtypeCheckInfo sci3 =
       MakeSubtypeCheckInfoUnchecked(MakeBitStringMax<2u>(),
-                                   /*overflow*/false,
-                                   /*depth*/BitString::kCapacity - 1u);
+                                   /*overflow=*/false,
+                                   /*depth=*/BitString::kCapacity - 1u);
 
   EXPECT_EQ(MAKE_ENCODED_PATH(MaxInt<BitString::StorageType>(12), 0b1111, 0b0),
             sci3.GetEncodedPathToRoot());
@@ -300,8 +300,8 @@
 
   SubtypeCheckInfo sci4 =
       MakeSubtypeCheckInfoUnchecked(MakeBitString({0b1010101u}),
-                                   /*overflow*/false,
-                                   /*depth*/BitString::kCapacity - 2u);
+                                   /*overflow=*/false,
+                                   /*depth=*/BitString::kCapacity - 2u);
 
   EXPECT_EQ(MAKE_ENCODED_PATH(0b1010101u, 0b0000, 0b0), sci4.GetEncodedPathToRoot());
   EXPECT_EQ(MAKE_ENCODED_PATH(MaxInt<BitString::StorageType>(12), 0b0000, 0b0),
@@ -320,7 +320,7 @@
   SubtypeCheckInfo root = SubtypeCheckInfo::CreateRoot();
   EXPECT_EQ(MakeBitStringChar(1u), root.GetNext());
 
-  SubtypeCheckInfo childC = root.CreateChild(/*assign*/true);
+  SubtypeCheckInfo childC = root.CreateChild(/*assign_next=*/true);
   EXPECT_EQ(SubtypeCheckInfo::kAssigned, childC.GetState());
   EXPECT_EQ(MakeBitStringChar(2u), root.GetNext());  // Next incremented for Assign.
   EXPECT_EQ(MakeBitString({1u}), GetPathToRoot(childC));
@@ -331,7 +331,7 @@
 
   // CopyCleared is just a thin wrapper around value-init and providing the depth.
   SubtypeCheckInfo cleared_copy_value =
-      SubtypeCheckInfo::Create(SubtypeCheckBits{}, /*depth*/1u);
+      SubtypeCheckInfo::Create(SubtypeCheckBits{}, /*depth=*/1u);
   EXPECT_EQ(SubtypeCheckInfo::kUninitialized, cleared_copy_value.GetState());
   EXPECT_EQ(MakeBitString({}), GetPathToRoot(cleared_copy_value));
 }
@@ -340,7 +340,7 @@
   SubtypeCheckInfo root = SubtypeCheckInfo::CreateRoot();
   EXPECT_EQ(MakeBitStringChar(1u), root.GetNext());
 
-  SubtypeCheckInfo childC = root.CreateChild(/*assign*/true);
+  SubtypeCheckInfo childC = root.CreateChild(/*assign_next=*/true);
   EXPECT_EQ(SubtypeCheckInfo::kAssigned, childC.GetState());
   EXPECT_EQ(MakeBitStringChar(2u), root.GetNext());  // Next incremented for Assign.
   EXPECT_EQ(MakeBitString({1u}), GetPathToRoot(childC));
@@ -350,17 +350,17 @@
   SubtypeCheckInfo root = SubtypeCheckInfo::CreateRoot();
   EXPECT_EQ(MakeBitStringChar(1u), root.GetNext());
 
-  SubtypeCheckInfo childA = root.CreateChild(/*assign*/false);
+  SubtypeCheckInfo childA = root.CreateChild(/*assign_next=*/false);
   EXPECT_EQ(SubtypeCheckInfo::kInitialized, childA.GetState());
   EXPECT_EQ(MakeBitStringChar(1u), root.GetNext());  // Next unchanged for Initialize.
   EXPECT_EQ(MakeBitString({}), GetPathToRoot(childA));
 
-  SubtypeCheckInfo childB = root.CreateChild(/*assign*/false);
+  SubtypeCheckInfo childB = root.CreateChild(/*assign_next=*/false);
   EXPECT_EQ(SubtypeCheckInfo::kInitialized, childB.GetState());
   EXPECT_EQ(MakeBitStringChar(1u), root.GetNext());  // Next unchanged for Initialize.
   EXPECT_EQ(MakeBitString({}), GetPathToRoot(childB));
 
-  SubtypeCheckInfo childC = root.CreateChild(/*assign*/true);
+  SubtypeCheckInfo childC = root.CreateChild(/*assign_next=*/true);
   EXPECT_EQ(SubtypeCheckInfo::kAssigned, childC.GetState());
   EXPECT_EQ(MakeBitStringChar(2u), root.GetNext());  // Next incremented for Assign.
   EXPECT_EQ(MakeBitString({1u}), GetPathToRoot(childC));
@@ -369,19 +369,19 @@
     size_t cur_depth = 1u;
     SubtypeCheckInfo latest_child = childC;
     while (cur_depth != BitString::kCapacity) {
-      latest_child = latest_child.CreateChild(/*assign*/true);
+      latest_child = latest_child.CreateChild(/*assign_next=*/true);
       ASSERT_EQ(SubtypeCheckInfo::kAssigned, latest_child.GetState());
       ASSERT_EQ(cur_depth + 1u, GetPathToRoot(latest_child).Length());
       cur_depth++;
     }
 
     // Future assignments will result in a too-deep overflow.
-    SubtypeCheckInfo child_of_deep = latest_child.CreateChild(/*assign*/true);
+    SubtypeCheckInfo child_of_deep = latest_child.CreateChild(/*assign_next=*/true);
     EXPECT_EQ(SubtypeCheckInfo::kOverflowed, child_of_deep.GetState());
     EXPECT_EQ(GetPathToRoot(latest_child), GetPathToRoot(child_of_deep));
 
     // Assignment of too-deep overflow also causes overflow.
-    SubtypeCheckInfo child_of_deep_2 = child_of_deep.CreateChild(/*assign*/true);
+    SubtypeCheckInfo child_of_deep_2 = child_of_deep.CreateChild(/*assign_next=*/true);
     EXPECT_EQ(SubtypeCheckInfo::kOverflowed, child_of_deep_2.GetState());
     EXPECT_EQ(GetPathToRoot(child_of_deep), GetPathToRoot(child_of_deep_2));
   }
@@ -393,7 +393,7 @@
         break;
       }
 
-      SubtypeCheckInfo child = root.CreateChild(/*assign*/true);
+      SubtypeCheckInfo child = root.CreateChild(/*assign_next=*/true);
       ASSERT_EQ(SubtypeCheckInfo::kAssigned, child.GetState());
       ASSERT_EQ(MakeBitStringChar(cur_next+1u), root.GetNext());
       ASSERT_EQ(MakeBitString({cur_next}), GetPathToRoot(child));
@@ -403,20 +403,20 @@
     // Now the root will be in a state that further assigns will be too-wide overflow.
 
     // Initialization still succeeds.
-    SubtypeCheckInfo child = root.CreateChild(/*assign*/false);
+    SubtypeCheckInfo child = root.CreateChild(/*assign_next=*/false);
     EXPECT_EQ(SubtypeCheckInfo::kInitialized, child.GetState());
     EXPECT_EQ(MakeBitStringChar(cur_next), root.GetNext());
     EXPECT_EQ(MakeBitString({}), GetPathToRoot(child));
 
     // Assignment goes to too-wide Overflow.
-    SubtypeCheckInfo child_of = root.CreateChild(/*assign*/true);
+    SubtypeCheckInfo child_of = root.CreateChild(/*assign_next=*/true);
     EXPECT_EQ(SubtypeCheckInfo::kOverflowed, child_of.GetState());
     EXPECT_EQ(MakeBitStringChar(cur_next), root.GetNext());
     EXPECT_EQ(MakeBitString({}), GetPathToRoot(child_of));
 
     // Assignment of overflowed child still succeeds.
     // The path to root is the same.
-    SubtypeCheckInfo child_of2 = child_of.CreateChild(/*assign*/true);
+    SubtypeCheckInfo child_of2 = child_of.CreateChild(/*assign_next=*/true);
     EXPECT_EQ(SubtypeCheckInfo::kOverflowed, child_of2.GetState());
     EXPECT_EQ(GetPathToRoot(child_of), GetPathToRoot(child_of2));
   }
diff --git a/runtime/subtype_check_test.cc b/runtime/subtype_check_test.cc
index 9aa3032..719e5d9 100644
--- a/runtime/subtype_check_test.cc
+++ b/runtime/subtype_check_test.cc
@@ -302,7 +302,7 @@
 struct SubtypeCheckTest : public ::testing::Test {
  protected:
   void SetUp() override {
-    android::base::InitLogging(/*argv*/nullptr);
+    android::base::InitLogging(/*argv=*/nullptr);
 
     CreateRootedTree(BitString::kCapacity + 2u, BitString::kCapacity + 2u);
   }
@@ -312,8 +312,8 @@
 
   void CreateRootedTree(size_t width, size_t height) {
     all_classes_.clear();
-    root_ = CreateClassFor(/*parent*/nullptr, /*x*/0, /*y*/0);
-    CreateTreeFor(root_, /*width*/width, /*depth*/height);
+    root_ = CreateClassFor(/*parent=*/nullptr, /*x=*/0, /*y=*/0);
+    CreateTreeFor(root_, /*width=*/width, /*levels=*/height);
   }
 
   MockClass* CreateClassFor(MockClass* parent, size_t x, size_t y) {
@@ -681,7 +681,7 @@
     const std::vector<std::pair<SubtypeCheckInfo::State, SubtypeCheckInfo::State>>& transitions) {
   ASSERT_EQ(depth, transitions.size());
 
-  EnsureStateChangedTestRecursive(root, /*cur_depth*/0u, depth, transitions);
+  EnsureStateChangedTestRecursive(root, /*cur_depth=*/0u, depth, transitions);
 }
 
 TEST_F(SubtypeCheckTest, EnsureInitialized_NoOverflow) {
@@ -869,8 +869,8 @@
 
   {
     // Create too-wide siblings at the kTargetDepth level.
-    MockClass* child = root_->FindChildAt(/*x*/0, kTargetDepth - 1u);
-    CreateTreeFor(child, kMaxWidthCutOff*2, /*depth*/1);
+    MockClass* child = root_->FindChildAt(/*x=*/0, kTargetDepth - 1u);
+    CreateTreeFor(child, kMaxWidthCutOff*2, /*levels=*/1);
     ASSERT_LE(kMaxWidthCutOff*2, child->GetNumberOfChildren());
     ASSERT_TRUE(IsTooWide(child->GetMaxChild())) << *(child->GetMaxChild());
     // Leave the rest of the tree as the default.
@@ -914,15 +914,15 @@
 
   {
     // Create too-wide siblings at the kTargetDepth level.
-    MockClass* child = root_->FindChildAt(/*x*/0, kTargetDepth - 1);
-    CreateTreeFor(child, kMaxWidthCutOff*2, /*depth*/1);
+    MockClass* child = root_->FindChildAt(/*x=*/0, kTargetDepth - 1);
+    CreateTreeFor(child, kMaxWidthCutOff*2, /*levels=*/1);
     ASSERT_LE(kMaxWidthCutOff*2, child->GetNumberOfChildren()) << *child;
     ASSERT_TRUE(IsTooWide(child->GetMaxChild())) << *(child->GetMaxChild());
     // Leave the rest of the tree as the default.
 
     // Create too-wide children for a too-wide parent.
-    MockClass* child_subchild = child->FindChildAt(/*x*/0, kTargetDepth);
-    CreateTreeFor(child_subchild, kMaxWidthCutOffSub*2, /*depth*/1);
+    MockClass* child_subchild = child->FindChildAt(/*x=*/0, kTargetDepth);
+    CreateTreeFor(child_subchild, kMaxWidthCutOffSub*2, /*levels=*/1);
     ASSERT_LE(kMaxWidthCutOffSub*2, child_subchild->GetNumberOfChildren()) << *child_subchild;
     ASSERT_TRUE(IsTooWide(child_subchild->GetMaxChild())) << *(child_subchild->GetMaxChild());
   }
@@ -1035,8 +1035,8 @@
 
   {
     // Create too-wide siblings at the kTargetDepth level.
-    MockClass* child = root_->FindChildAt(/*x*/0, kTargetDepth - 1u);
-    CreateTreeFor(child, kMaxWidthCutOff*2, /*depth*/1);
+    MockClass* child = root_->FindChildAt(/*x=*/0, kTargetDepth - 1u);
+    CreateTreeFor(child, kMaxWidthCutOff*2, /*levels=*/1);
     ASSERT_LE(kMaxWidthCutOff*2, child->GetNumberOfChildren());
     ASSERT_TRUE(IsTooWide(child->GetMaxChild())) << *(child->GetMaxChild());
     // Leave the rest of the tree as the default.
@@ -1045,7 +1045,7 @@
     MockClass* child_subchild = child->GetMaxChild();
     ASSERT_TRUE(child_subchild != nullptr);
     ASSERT_EQ(0u, child_subchild->GetNumberOfChildren()) << *child_subchild;
-    CreateTreeFor(child_subchild, /*width*/1, /*levels*/kTooDeepTargetDepth);
+    CreateTreeFor(child_subchild, /*width=*/1, /*levels=*/kTooDeepTargetDepth);
     MockClass* too_deep_child = child_subchild->FindChildAt(0, kTooDeepTargetDepth + 2);
     ASSERT_TRUE(too_deep_child != nullptr) << child_subchild->ToDotGraph();
     ASSERT_TRUE(IsTooWide(too_deep_child)) << *(too_deep_child);
diff --git a/runtime/thread.cc b/runtime/thread.cc
index b3492e1..a3de4e2 100644
--- a/runtime/thread.cc
+++ b/runtime/thread.cc
@@ -72,6 +72,7 @@
 #include "handle_scope-inl.h"
 #include "indirect_reference_table-inl.h"
 #include "interpreter/interpreter.h"
+#include "interpreter/mterp/mterp.h"
 #include "interpreter/shadow_frame-inl.h"
 #include "java_frame_root_info.h"
 #include "jni/java_vm_ext.h"
@@ -93,6 +94,7 @@
 #include "quick_exception_handler.h"
 #include "read_barrier-inl.h"
 #include "reflection.h"
+#include "runtime-inl.h"
 #include "runtime.h"
 #include "runtime_callbacks.h"
 #include "scoped_thread_state_change-inl.h"
@@ -150,7 +152,7 @@
 void Thread::SetIsGcMarkingAndUpdateEntrypoints(bool is_marking) {
   CHECK(kUseReadBarrier);
   tls32_.is_gc_marking = is_marking;
-  UpdateReadBarrierEntrypoints(&tlsPtr_.quick_entrypoints, /* is_active */ is_marking);
+  UpdateReadBarrierEntrypoints(&tlsPtr_.quick_entrypoints, /* is_active= */ is_marking);
   ResetQuickAllocEntryPointsForThread(is_marking);
 }
 
@@ -577,7 +579,7 @@
   VLOG(threads) << "installing stack protected region at " << std::hex <<
         static_cast<void*>(pregion) << " to " <<
         static_cast<void*>(pregion + kStackOverflowProtectedSize - 1);
-  if (ProtectStack(/* fatal_on_error */ false)) {
+  if (ProtectStack(/* fatal_on_error= */ false)) {
     // Tell the kernel that we won't be needing these pages any more.
     // NB. madvise will probably write zeroes into the memory (on linux it does).
     uint32_t unwanted_size = stack_top - pregion - kPageSize;
@@ -646,7 +648,7 @@
       static_cast<void*>(pregion + kStackOverflowProtectedSize - 1);
 
   // Protect the bottom of the stack to prevent read/write to it.
-  ProtectStack(/* fatal_on_error */ true);
+  ProtectStack(/* fatal_on_error= */ true);
 
   // Tell the kernel that we won't be needing these pages any more.
   // NB. madvise will probably write zeroes into the memory (on linux it does).
@@ -2012,13 +2014,13 @@
       DumpKernelStack(os, GetTid(), "  kernel: ", false);
       ArtMethod* method =
           GetCurrentMethod(nullptr,
-                           /*check_suspended*/ !force_dump_stack,
-                           /*abort_on_error*/ !(dump_for_abort || force_dump_stack));
+                           /*check_suspended=*/ !force_dump_stack,
+                           /*abort_on_error=*/ !(dump_for_abort || force_dump_stack));
       DumpNativeStack(os, GetTid(), backtrace_map, "  native: ", method);
     }
     DumpJavaStack(os,
-                  /*check_suspended*/ !force_dump_stack,
-                  /*dump_locks*/ !force_dump_stack);
+                  /*check_suspended=*/ !force_dump_stack,
+                  /*dump_locks=*/ !force_dump_stack);
   } else {
     os << "Not able to dump stack of thread that isn't suspended";
   }
@@ -2141,6 +2143,11 @@
   tlsPtr_.flip_function = nullptr;
   tlsPtr_.thread_local_mark_stack = nullptr;
   tls32_.is_transitioning_to_runnable = false;
+  tls32_.use_mterp = false;
+}
+
+void Thread::NotifyInTheadList() {
+  tls32_.use_mterp = interpreter::CanUseMterp();
 }
 
 bool Thread::CanLoadClasses() const {
@@ -2904,8 +2911,8 @@
   // Make sure the AnnotatedStackTraceElement.class is initialized, b/76208924 .
   class_linker->EnsureInitialized(soa.Self(),
                                   h_aste_class,
-                                  /* can_init_fields */ true,
-                                  /* can_init_parents */ true);
+                                  /* can_init_fields= */ true,
+                                  /* can_init_parents= */ true);
   if (soa.Self()->IsExceptionPending()) {
     // This should not fail in a healthy runtime.
     return nullptr;
@@ -3422,9 +3429,9 @@
       }
       PushDeoptimizationContext(
           JValue(),
-          false /* is_reference */,
+          /* is_reference= */ false,
           (force_deopt ? nullptr : exception),
-          false /* from_code */,
+          /* from_code= */ false,
           method_type);
       artDeoptimize(this);
       UNREACHABLE();
@@ -3550,7 +3557,7 @@
       }
     }
     // Mark lock count map required for structured locking checks.
-    shadow_frame->GetLockCountData().VisitMonitors(visitor_, /* vreg */ -1, this);
+    shadow_frame->GetLockCountData().VisitMonitors(visitor_, /* vreg= */ -1, this);
   }
 
  private:
@@ -3566,7 +3573,7 @@
       if (kVerifyImageObjectsMarked) {
         gc::Heap* const heap = Runtime::Current()->GetHeap();
         gc::space::ContinuousSpace* space = heap->FindContinuousSpaceFromObject(klass,
-                                                                                /*fail_ok*/true);
+                                                                                /*fail_ok=*/true);
         if (space != nullptr && space->IsImageSpace()) {
           bool failed = false;
           if (!space->GetLiveBitmap()->Test(klass.Ptr())) {
@@ -3588,7 +3595,7 @@
         }
       }
       mirror::Object* new_ref = klass.Ptr();
-      visitor_(&new_ref, /* vreg */ -1, this);
+      visitor_(&new_ref, /* vreg= */ -1, this);
       if (new_ref != klass) {
         method->CASDeclaringClass(klass.Ptr(), new_ref->AsClass());
       }
@@ -3661,7 +3668,7 @@
         mirror::Object* ref = ref_addr->AsMirrorPtr();
         if (ref != nullptr) {
           mirror::Object* new_ref = ref;
-          visitor_(&new_ref, /* vreg */ -1, this);
+          visitor_(&new_ref, /* vreg= */ -1, this);
           if (ref != new_ref) {
             ref_addr->Assign(new_ref);
           }
@@ -3854,9 +3861,9 @@
 
 void Thread::VisitRoots(RootVisitor* visitor, VisitRootFlags flags) {
   if ((flags & VisitRootFlags::kVisitRootFlagPrecise) != 0) {
-    VisitRoots</* kPrecise */ true>(visitor);
+    VisitRoots</* kPrecise= */ true>(visitor);
   } else {
-    VisitRoots</* kPrecise */ false>(visitor);
+    VisitRoots</* kPrecise= */ false>(visitor);
   }
 }
 
@@ -4071,7 +4078,7 @@
 
 void Thread::SetReadBarrierEntrypoints() {
   // Make sure entrypoints aren't null.
-  UpdateReadBarrierEntrypoints(&tlsPtr_.quick_entrypoints, /* is_active*/ true);
+  UpdateReadBarrierEntrypoints(&tlsPtr_.quick_entrypoints, /* is_active=*/ true);
 }
 
 void Thread::ClearAllInterpreterCaches() {
diff --git a/runtime/thread.h b/runtime/thread.h
index d7dc5ae..941867c 100644
--- a/runtime/thread.h
+++ b/runtime/thread.h
@@ -19,6 +19,7 @@
 
 #include <setjmp.h>
 
+#include <atomic>
 #include <bitset>
 #include <deque>
 #include <iosfwd>
@@ -672,6 +673,13 @@
   }
 
   template<PointerSize pointer_size>
+  static constexpr ThreadOffset<pointer_size> UseMterpOffset() {
+    return ThreadOffset<pointer_size>(
+        OFFSETOF_MEMBER(Thread, tls32_) +
+        OFFSETOF_MEMBER(tls_32bit_sized_values, use_mterp));
+  }
+
+  template<PointerSize pointer_size>
   static constexpr ThreadOffset<pointer_size> IsGcMarkingOffset() {
     return ThreadOffset<pointer_size>(
         OFFSETOF_MEMBER(Thread, tls32_) +
@@ -1113,6 +1121,10 @@
     tls32_.state_and_flags.as_atomic_int.fetch_and(-1 ^ flag, std::memory_order_seq_cst);
   }
 
+  bool UseMterp() const {
+    return tls32_.use_mterp.load();
+  }
+
   void ResetQuickAllocEntryPointsForThread(bool is_marking);
 
   // Returns the remaining space in the TLAB.
@@ -1283,6 +1295,9 @@
   ~Thread() REQUIRES(!Locks::mutator_lock_, !Locks::thread_suspend_count_lock_);
   void Destroy();
 
+  void NotifyInTheadList()
+      REQUIRES_SHARED(Locks::thread_list_lock_);
+
   // Attaches the calling native thread to the runtime, returning the new native peer.
   // Used to implement JNI AttachCurrentThread and AttachCurrentThreadAsDaemon calls.
   template <typename PeerAction>
@@ -1547,6 +1562,10 @@
     // This should have GUARDED_BY(Locks::user_code_suspension_lock_) but auto analysis cannot be
     // told that AssertHeld should be good enough.
     int user_code_suspend_count GUARDED_BY(Locks::thread_suspend_count_lock_);
+
+    // True if everything is in the ideal state for fast interpretation.
+    // False if we need to switch to the C++ interpreter to handle special cases.
+    std::atomic<bool32_t> use_mterp;
   } tls32_;
 
   struct PACKED(8) tls_64bit_sized_values {
diff --git a/runtime/thread_list.cc b/runtime/thread_list.cc
index ec40716..d21b600 100644
--- a/runtime/thread_list.cc
+++ b/runtime/thread_list.cc
@@ -438,7 +438,7 @@
   // Wake up the threads blocking for weak ref access so that they will respond to the empty
   // checkpoint request. Otherwise we will hang as they are blocking in the kRunnable state.
   Runtime::Current()->GetHeap()->GetReferenceProcessor()->BroadcastForSlowPath(self);
-  Runtime::Current()->BroadcastForNewSystemWeaks(/*broadcast_for_checkpoint*/true);
+  Runtime::Current()->BroadcastForNewSystemWeaks(/*broadcast_for_checkpoint=*/true);
   {
     ScopedThreadStateChange tsc(self, kWaitingForCheckPointsToRun);
     uint64_t total_wait_time = 0;
@@ -491,9 +491,9 @@
               // Found a runnable thread that hasn't responded to the empty checkpoint request.
               // Assume it's stuck and safe to dump its stack.
               thread->Dump(LOG_STREAM(FATAL_WITHOUT_ABORT),
-                           /*dump_native_stack*/ true,
-                           /*backtrace_map*/ nullptr,
-                           /*force_dump_stack*/ true);
+                           /*dump_native_stack=*/ true,
+                           /*backtrace_map=*/ nullptr,
+                           /*force_dump_stack=*/ true);
             }
           }
         }
@@ -1431,6 +1431,7 @@
     }
     self->SetWeakRefAccessEnabled(cc->IsWeakRefAccessEnabled());
   }
+  self->NotifyInTheadList();
 }
 
 void ThreadList::Unregister(Thread* self) {
diff --git a/runtime/thread_pool.cc b/runtime/thread_pool.cc
index f1c808b..a245f65 100644
--- a/runtime/thread_pool.cc
+++ b/runtime/thread_pool.cc
@@ -47,10 +47,10 @@
   stack_size += kPageSize;
   std::string error_msg;
   stack_ = MemMap::MapAnonymous(name.c_str(),
-                                /* addr */ nullptr,
+                                /* addr= */ nullptr,
                                 stack_size,
                                 PROT_READ | PROT_WRITE,
-                                /* low_4gb */ false,
+                                /* low_4gb= */ false,
                                 &error_msg);
   CHECK(stack_.IsValid()) << error_msg;
   CHECK_ALIGNED(stack_.Begin(), kPageSize);
diff --git a/runtime/thread_pool_test.cc b/runtime/thread_pool_test.cc
index 2600f55..9e7c44a 100644
--- a/runtime/thread_pool_test.cc
+++ b/runtime/thread_pool_test.cc
@@ -119,7 +119,7 @@
   // Drain the task list. Note: we have to restart here, as no tasks will be finished when
   // the pool is stopped.
   thread_pool.StartWorkers(self);
-  thread_pool.Wait(self, /* do_work */ true, false);
+  thread_pool.Wait(self, /* do_work= */ true, false);
 }
 
 class TreeTask : public Task {
diff --git a/runtime/trace.cc b/runtime/trace.cc
index 4ee983d..ad58c2e 100644
--- a/runtime/trace.cc
+++ b/runtime/trace.cc
@@ -435,7 +435,7 @@
         // want to use the trampolines anyway since it is faster. It makes the story with disabling
         // jit-gc more complex though.
         runtime->GetInstrumentation()->EnableMethodTracing(
-            kTracerInstrumentationKey, /*needs_interpreter*/!runtime->IsJavaDebuggable());
+            kTracerInstrumentationKey, /*needs_interpreter=*/!runtime->IsJavaDebuggable());
       }
     }
   }
diff --git a/runtime/vdex_file.cc b/runtime/vdex_file.cc
index 452cd8e..bd59e73 100644
--- a/runtime/vdex_file.cc
+++ b/runtime/vdex_file.cc
@@ -150,11 +150,11 @@
       (writable || unquicken) ? PROT_READ | PROT_WRITE : PROT_READ,
       unquicken ? MAP_PRIVATE : MAP_SHARED,
       file_fd,
-      /* start */ 0u,
+      /* start= */ 0u,
       low_4gb,
       vdex_filename.c_str(),
       mmap_reuse,
-      /* reservation */ nullptr,
+      /* reservation= */ nullptr,
       error_msg);
   if (!mmap.IsValid()) {
     *error_msg = "Failed to mmap file " + vdex_filename + " : " + *error_msg;
@@ -173,7 +173,7 @@
       return nullptr;
     }
     vdex->Unquicken(MakeNonOwningPointerVector(unique_ptr_dex_files),
-                    /* decompile_return_instruction */ false);
+                    /* decompile_return_instruction= */ false);
     // Update the quickening info size to pretend there isn't any.
     size_t offset = vdex->GetDexSectionHeaderOffset();
     reinterpret_cast<DexSectionHeader*>(vdex->mmap_.Begin() + offset)->quickening_info_size_ = 0;
@@ -213,13 +213,13 @@
     std::unique_ptr<const DexFile> dex(dex_file_loader.OpenWithDataSection(
         dex_file_start,
         size,
-        /*data_base*/ nullptr,
-        /*data_size*/ 0u,
+        /*data_base=*/ nullptr,
+        /*data_size=*/ 0u,
         location,
         GetLocationChecksum(i),
-        nullptr /*oat_dex_file*/,
-        false /*verify*/,
-        false /*verify_checksum*/,
+        /*oat_dex_file=*/ nullptr,
+        /*verify=*/ false,
+        /*verify_checksum=*/ false,
         error_msg));
     if (dex == nullptr) {
       return false;
diff --git a/runtime/vdex_file_test.cc b/runtime/vdex_file_test.cc
index ced6e28..9d92b42 100644
--- a/runtime/vdex_file_test.cc
+++ b/runtime/vdex_file_test.cc
@@ -34,14 +34,14 @@
   std::unique_ptr<VdexFile> vdex = VdexFile::Open(tmp.GetFd(),
                                                   0,
                                                   tmp.GetFilename(),
-                                                  /*writable*/false,
-                                                  /*low_4gb*/false,
-                                                  /*quicken*/false,
+                                                  /*writable=*/false,
+                                                  /*low_4gb=*/false,
+                                                  /*unquicken=*/false,
                                                   &error_msg);
   EXPECT_TRUE(vdex == nullptr);
 
   vdex = VdexFile::Open(
-      tmp.GetFilename(), /*writable*/false, /*low_4gb*/false, /*quicken*/ false, &error_msg);
+      tmp.GetFilename(), /*writable=*/false, /*low_4gb=*/false, /*unquicken=*/ false, &error_msg);
   EXPECT_TRUE(vdex == nullptr);
 }
 
diff --git a/runtime/verifier/method_verifier.cc b/runtime/verifier/method_verifier.cc
index 5fce892..7b07389 100644
--- a/runtime/verifier/method_verifier.cc
+++ b/runtime/verifier/method_verifier.cc
@@ -242,7 +242,7 @@
     *previous_idx = method_idx;
     const InvokeType type = method.GetInvokeType(class_def.access_flags_);
     ArtMethod* resolved_method = linker->ResolveMethod<ClassLinker::ResolveMode::kNoChecks>(
-        method_idx, dex_cache, class_loader, /* referrer */ nullptr, type);
+        method_idx, dex_cache, class_loader, /* referrer= */ nullptr, type);
     if (resolved_method == nullptr) {
       DCHECK(self->IsExceptionPending());
       // We couldn't resolve the method, but continue regardless.
@@ -263,7 +263,7 @@
                                                       callbacks,
                                                       allow_soft_failures,
                                                       log_level,
-                                                      /*need_precise_constants*/ false,
+                                                      /*need_precise_constants=*/ false,
                                                       api_level,
                                                       &hard_failure_msg);
     if (result.kind == FailureKind::kHardFailure) {
@@ -340,11 +340,11 @@
                           method_idx,
                           method,
                           method_access_flags,
-                          true /* can_load_classes */,
+                          /* can_load_classes= */ true,
                           allow_soft_failures,
                           need_precise_constants,
-                          false /* verify to dump */,
-                          true /* allow_thread_suspension */,
+                          /* verify to dump */ false,
+                          /* allow_thread_suspension= */ true,
                           api_level);
   if (verifier.Verify()) {
     // Verification completed, however failures may be pending that didn't cause the verification
@@ -475,11 +475,11 @@
                                                 dex_method_idx,
                                                 method,
                                                 method_access_flags,
-                                                true /* can_load_classes */,
-                                                true /* allow_soft_failures */,
-                                                true /* need_precise_constants */,
-                                                true /* verify_to_dump */,
-                                                true /* allow_thread_suspension */,
+                                                /* can_load_classes= */ true,
+                                                /* allow_soft_failures= */ true,
+                                                /* need_precise_constants= */ true,
+                                                /* verify_to_dump= */ true,
+                                                /* allow_thread_suspension= */ true,
                                                 api_level);
   verifier->Verify();
   verifier->DumpFailures(vios->Stream());
@@ -570,11 +570,11 @@
                           m->GetDexMethodIndex(),
                           m,
                           m->GetAccessFlags(),
-                          false /* can_load_classes */,
-                          true  /* allow_soft_failures */,
-                          false /* need_precise_constants */,
-                          false /* verify_to_dump */,
-                          false /* allow_thread_suspension */,
+                          /* can_load_classes= */ false,
+                          /* allow_soft_failures= */ true,
+                          /* need_precise_constants= */ false,
+                          /* verify_to_dump= */ false,
+                          /* allow_thread_suspension= */ false,
                           api_level);
   verifier.interesting_dex_pc_ = dex_pc;
   verifier.monitor_enter_dex_pcs_ = monitor_enter_dex_pcs;
diff --git a/runtime/verifier/method_verifier_test.cc b/runtime/verifier/method_verifier_test.cc
index cedc583..7519257 100644
--- a/runtime/verifier/method_verifier_test.cc
+++ b/runtime/verifier/method_verifier_test.cc
@@ -42,7 +42,7 @@
     // Verify the class
     std::string error_msg;
     FailureKind failure = MethodVerifier::VerifyClass(
-        self, klass, nullptr, true, HardFailLogMode::kLogWarning, /* api_level */ 0u, &error_msg);
+        self, klass, nullptr, true, HardFailLogMode::kLogWarning, /* api_level= */ 0u, &error_msg);
 
     if (android::base::StartsWith(descriptor, "Ljava/lang/invoke")) {
       ASSERT_TRUE(failure == FailureKind::kSoftFailure ||
diff --git a/runtime/verifier/reg_type.cc b/runtime/verifier/reg_type.cc
index 4a3f9e6..91be00d 100644
--- a/runtime/verifier/reg_type.cc
+++ b/runtime/verifier/reg_type.cc
@@ -756,13 +756,13 @@
         VerifierDeps::MaybeRecordAssignability(verifier->GetDexFile(),
                                                join_class,
                                                GetClass(),
-                                               /* strict */ true,
-                                               /* is_assignable */ true);
+                                               /* is_strict= */ true,
+                                               /* is_assignable= */ true);
         VerifierDeps::MaybeRecordAssignability(verifier->GetDexFile(),
                                                join_class,
                                                incoming_type.GetClass(),
-                                               /* strict */ true,
-                                               /* is_assignable */ true);
+                                               /* is_strict= */ true,
+                                               /* is_assignable= */ true);
       }
       if (GetClass() == join_class && !IsPreciseReference()) {
         return *this;
@@ -771,7 +771,7 @@
       } else {
         std::string temp;
         const char* descriptor = join_class->GetDescriptor(&temp);
-        return reg_types->FromClass(descriptor, join_class, /* precise */ false);
+        return reg_types->FromClass(descriptor, join_class, /* precise= */ false);
       }
     }
   } else {
diff --git a/runtime/verifier/reg_type_cache-inl.h b/runtime/verifier/reg_type_cache-inl.h
index 9f87adf..f62e8b6 100644
--- a/runtime/verifier/reg_type_cache-inl.h
+++ b/runtime/verifier/reg_type_cache-inl.h
@@ -126,7 +126,7 @@
 inline const PreciseReferenceType& RegTypeCache::JavaLangClass() {
   const RegType* result = &FromClass("Ljava/lang/Class;",
                                      GetClassRoot<mirror::Class>(),
-                                     /* precise */ true);
+                                     /* precise= */ true);
   DCHECK(result->IsPreciseReference());
   return *down_cast<const PreciseReferenceType*>(result);
 }
@@ -135,7 +135,7 @@
   // String is final and therefore always precise.
   const RegType* result = &FromClass("Ljava/lang/String;",
                                      GetClassRoot<mirror::String>(),
-                                     /* precise */ true);
+                                     /* precise= */ true);
   DCHECK(result->IsPreciseReference());
   return *down_cast<const PreciseReferenceType*>(result);
 }
@@ -143,7 +143,7 @@
 inline const PreciseReferenceType& RegTypeCache::JavaLangInvokeMethodHandle() {
   const RegType* result = &FromClass("Ljava/lang/invoke/MethodHandle;",
                                      GetClassRoot<mirror::MethodHandle>(),
-                                     /* precise */ true);
+                                     /* precise= */ true);
   DCHECK(result->IsPreciseReference());
   return *down_cast<const PreciseReferenceType*>(result);
 }
@@ -151,7 +151,7 @@
 inline const PreciseReferenceType& RegTypeCache::JavaLangInvokeMethodType() {
   const RegType* result = &FromClass("Ljava/lang/invoke/MethodType;",
                                      GetClassRoot<mirror::MethodType>(),
-                                     /* precise */ true);
+                                     /* precise= */ true);
   DCHECK(result->IsPreciseReference());
   return *down_cast<const PreciseReferenceType*>(result);
 }
diff --git a/runtime/verifier/reg_type_cache.cc b/runtime/verifier/reg_type_cache.cc
index f1f3488..ceba748 100644
--- a/runtime/verifier/reg_type_cache.cc
+++ b/runtime/verifier/reg_type_cache.cc
@@ -438,14 +438,14 @@
 
     // Is the resolved part a primitive array?
     if (resolved_merged_is_array && !resolved_parts_merged.IsObjectArrayTypes()) {
-      return JavaLangObject(false /* precise */);
+      return JavaLangObject(/* precise= */ false);
     }
 
     // Is any part not an array (but exists)?
     if ((!left_unresolved_is_array && left_resolved != &left) ||
         (!right_unresolved_is_array && right_resolved != &right) ||
         !resolved_merged_is_array) {
-      return JavaLangObject(false /* precise */);
+      return JavaLangObject(/* precise= */ false);
     }
   }
 
diff --git a/runtime/verifier/reg_type_test.cc b/runtime/verifier/reg_type_test.cc
index 0430d20..3224385 100644
--- a/runtime/verifier/reg_type_test.cc
+++ b/runtime/verifier/reg_type_test.cc
@@ -80,8 +80,8 @@
   EXPECT_FALSE(precise_lo.CheckWidePair(precise_const));
   EXPECT_TRUE(precise_lo.CheckWidePair(precise_hi));
   // Test Merging.
-  EXPECT_TRUE((long_lo.Merge(precise_lo, &cache, /* verifier */ nullptr)).IsLongTypes());
-  EXPECT_TRUE((long_hi.Merge(precise_hi, &cache, /* verifier */ nullptr)).IsLongHighTypes());
+  EXPECT_TRUE((long_lo.Merge(precise_lo, &cache, /* verifier= */ nullptr)).IsLongTypes());
+  EXPECT_TRUE((long_hi.Merge(precise_hi, &cache, /* verifier= */ nullptr)).IsLongHighTypes());
 }
 
 TEST_F(RegTypeTest, Primitives) {
@@ -429,7 +429,7 @@
   const RegType& resolved_unintialiesd = cache.Uninitialized(resolved_ref, 10);
   const RegType& unresolved_unintialized = cache.Uninitialized(unresolved_ref, 12);
   const RegType& unresolved_merged = cache.FromUnresolvedMerge(
-      unresolved_ref, unresolved_ref_another, /* verifier */ nullptr);
+      unresolved_ref, unresolved_ref_another, /* verifier= */ nullptr);
 
   std::string expected = "Unresolved Reference: java.lang.DoesNotExist";
   EXPECT_EQ(expected, unresolved_ref.Dump());
@@ -490,14 +490,14 @@
   RegTypeCache cache_new(true, allocator);
   const RegType& string = cache_new.JavaLangString();
   const RegType& Object = cache_new.JavaLangObject(true);
-  EXPECT_TRUE(string.Merge(Object, &cache_new, /* verifier */ nullptr).IsJavaLangObject());
+  EXPECT_TRUE(string.Merge(Object, &cache_new, /* verifier= */ nullptr).IsJavaLangObject());
   // Merge two unresolved types.
   const RegType& ref_type_0 = cache_new.FromDescriptor(nullptr, "Ljava/lang/DoesNotExist;", true);
   EXPECT_TRUE(ref_type_0.IsUnresolvedReference());
   const RegType& ref_type_1 = cache_new.FromDescriptor(nullptr, "Ljava/lang/DoesNotExistToo;", true);
   EXPECT_FALSE(ref_type_0.Equals(ref_type_1));
 
-  const RegType& merged = ref_type_1.Merge(ref_type_0, &cache_new, /* verifier */ nullptr);
+  const RegType& merged = ref_type_1.Merge(ref_type_0, &cache_new, /* verifier= */ nullptr);
   EXPECT_TRUE(merged.IsUnresolvedMergedReference());
   RegType& merged_nonconst = const_cast<RegType&>(merged);
 
@@ -520,22 +520,22 @@
   const RegType& imprecise_cst = cache_new.FromCat1Const(kTestConstantValue, false);
   {
     // float MERGE precise cst => float.
-    const RegType& merged = float_type.Merge(precise_cst, &cache_new, /* verifier */ nullptr);
+    const RegType& merged = float_type.Merge(precise_cst, &cache_new, /* verifier= */ nullptr);
     EXPECT_TRUE(merged.IsFloat());
   }
   {
     // precise cst MERGE float => float.
-    const RegType& merged = precise_cst.Merge(float_type, &cache_new, /* verifier */ nullptr);
+    const RegType& merged = precise_cst.Merge(float_type, &cache_new, /* verifier= */ nullptr);
     EXPECT_TRUE(merged.IsFloat());
   }
   {
     // float MERGE imprecise cst => float.
-    const RegType& merged = float_type.Merge(imprecise_cst, &cache_new, /* verifier */ nullptr);
+    const RegType& merged = float_type.Merge(imprecise_cst, &cache_new, /* verifier= */ nullptr);
     EXPECT_TRUE(merged.IsFloat());
   }
   {
     // imprecise cst MERGE float => float.
-    const RegType& merged = imprecise_cst.Merge(float_type, &cache_new, /* verifier */ nullptr);
+    const RegType& merged = imprecise_cst.Merge(float_type, &cache_new, /* verifier= */ nullptr);
     EXPECT_TRUE(merged.IsFloat());
   }
 }
@@ -556,46 +556,46 @@
   const RegType& imprecise_cst_hi = cache_new.FromCat2ConstHi(kTestConstantValue, false);
   {
     // lo MERGE precise cst lo => lo.
-    const RegType& merged = long_lo_type.Merge(precise_cst_lo, &cache_new, /* verifier */ nullptr);
+    const RegType& merged = long_lo_type.Merge(precise_cst_lo, &cache_new, /* verifier= */ nullptr);
     EXPECT_TRUE(merged.IsLongLo());
   }
   {
     // precise cst lo MERGE lo => lo.
-    const RegType& merged = precise_cst_lo.Merge(long_lo_type, &cache_new, /* verifier */ nullptr);
+    const RegType& merged = precise_cst_lo.Merge(long_lo_type, &cache_new, /* verifier= */ nullptr);
     EXPECT_TRUE(merged.IsLongLo());
   }
   {
     // lo MERGE imprecise cst lo => lo.
     const RegType& merged = long_lo_type.Merge(
-        imprecise_cst_lo, &cache_new, /* verifier */ nullptr);
+        imprecise_cst_lo, &cache_new, /* verifier= */ nullptr);
     EXPECT_TRUE(merged.IsLongLo());
   }
   {
     // imprecise cst lo MERGE lo => lo.
     const RegType& merged = imprecise_cst_lo.Merge(
-        long_lo_type, &cache_new, /* verifier */ nullptr);
+        long_lo_type, &cache_new, /* verifier= */ nullptr);
     EXPECT_TRUE(merged.IsLongLo());
   }
   {
     // hi MERGE precise cst hi => hi.
-    const RegType& merged = long_hi_type.Merge(precise_cst_hi, &cache_new, /* verifier */ nullptr);
+    const RegType& merged = long_hi_type.Merge(precise_cst_hi, &cache_new, /* verifier= */ nullptr);
     EXPECT_TRUE(merged.IsLongHi());
   }
   {
     // precise cst hi MERGE hi => hi.
-    const RegType& merged = precise_cst_hi.Merge(long_hi_type, &cache_new, /* verifier */ nullptr);
+    const RegType& merged = precise_cst_hi.Merge(long_hi_type, &cache_new, /* verifier= */ nullptr);
     EXPECT_TRUE(merged.IsLongHi());
   }
   {
     // hi MERGE imprecise cst hi => hi.
     const RegType& merged = long_hi_type.Merge(
-        imprecise_cst_hi, &cache_new, /* verifier */ nullptr);
+        imprecise_cst_hi, &cache_new, /* verifier= */ nullptr);
     EXPECT_TRUE(merged.IsLongHi());
   }
   {
     // imprecise cst hi MERGE hi => hi.
     const RegType& merged = imprecise_cst_hi.Merge(
-        long_hi_type, &cache_new, /* verifier */ nullptr);
+        long_hi_type, &cache_new, /* verifier= */ nullptr);
     EXPECT_TRUE(merged.IsLongHi());
   }
 }
@@ -617,49 +617,49 @@
   {
     // lo MERGE precise cst lo => lo.
     const RegType& merged = double_lo_type.Merge(
-        precise_cst_lo, &cache_new, /* verifier */ nullptr);
+        precise_cst_lo, &cache_new, /* verifier= */ nullptr);
     EXPECT_TRUE(merged.IsDoubleLo());
   }
   {
     // precise cst lo MERGE lo => lo.
     const RegType& merged = precise_cst_lo.Merge(
-        double_lo_type, &cache_new, /* verifier */ nullptr);
+        double_lo_type, &cache_new, /* verifier= */ nullptr);
     EXPECT_TRUE(merged.IsDoubleLo());
   }
   {
     // lo MERGE imprecise cst lo => lo.
     const RegType& merged = double_lo_type.Merge(
-        imprecise_cst_lo, &cache_new, /* verifier */ nullptr);
+        imprecise_cst_lo, &cache_new, /* verifier= */ nullptr);
     EXPECT_TRUE(merged.IsDoubleLo());
   }
   {
     // imprecise cst lo MERGE lo => lo.
     const RegType& merged = imprecise_cst_lo.Merge(
-        double_lo_type, &cache_new, /* verifier */ nullptr);
+        double_lo_type, &cache_new, /* verifier= */ nullptr);
     EXPECT_TRUE(merged.IsDoubleLo());
   }
   {
     // hi MERGE precise cst hi => hi.
     const RegType& merged = double_hi_type.Merge(
-        precise_cst_hi, &cache_new, /* verifier */ nullptr);
+        precise_cst_hi, &cache_new, /* verifier= */ nullptr);
     EXPECT_TRUE(merged.IsDoubleHi());
   }
   {
     // precise cst hi MERGE hi => hi.
     const RegType& merged = precise_cst_hi.Merge(
-        double_hi_type, &cache_new, /* verifier */ nullptr);
+        double_hi_type, &cache_new, /* verifier= */ nullptr);
     EXPECT_TRUE(merged.IsDoubleHi());
   }
   {
     // hi MERGE imprecise cst hi => hi.
     const RegType& merged = double_hi_type.Merge(
-        imprecise_cst_hi, &cache_new, /* verifier */ nullptr);
+        imprecise_cst_hi, &cache_new, /* verifier= */ nullptr);
     EXPECT_TRUE(merged.IsDoubleHi());
   }
   {
     // imprecise cst hi MERGE hi => hi.
     const RegType& merged = imprecise_cst_hi.Merge(
-        double_hi_type, &cache_new, /* verifier */ nullptr);
+        double_hi_type, &cache_new, /* verifier= */ nullptr);
     EXPECT_TRUE(merged.IsDoubleHi());
   }
 }
diff --git a/runtime/verifier/verifier_deps.cc b/runtime/verifier/verifier_deps.cc
index fb91976..b666c15 100644
--- a/runtime/verifier/verifier_deps.cc
+++ b/runtime/verifier/verifier_deps.cc
@@ -43,7 +43,7 @@
 }
 
 VerifierDeps::VerifierDeps(const std::vector<const DexFile*>& dex_files)
-    : VerifierDeps(dex_files, /*output_only*/ true) {}
+    : VerifierDeps(dex_files, /*output_only=*/ true) {}
 
 void VerifierDeps::MergeWith(const VerifierDeps& other,
                              const std::vector<const DexFile*>& dex_files) {
@@ -439,7 +439,7 @@
       AddAssignability(dex_file,
                        destination_component,
                        source_component,
-                       /* is_strict */ true,
+                       /* is_strict= */ true,
                        is_assignable);
       return;
     }
@@ -707,7 +707,7 @@
 
 VerifierDeps::VerifierDeps(const std::vector<const DexFile*>& dex_files,
                            ArrayRef<const uint8_t> data)
-    : VerifierDeps(dex_files, /*output_only*/ false) {
+    : VerifierDeps(dex_files, /*output_only=*/ false) {
   if (data.empty()) {
     // Return eagerly, as the first thing we expect from VerifierDeps data is
     // the number of created strings, even if there is no dependency.
@@ -1089,9 +1089,9 @@
                                  const DexFileDeps& deps,
                                  Thread* self) const {
   bool result = VerifyAssignability(
-      class_loader, dex_file, deps.assignable_types_, /* expected_assignability */ true, self);
+      class_loader, dex_file, deps.assignable_types_, /* expected_assignability= */ true, self);
   result = result && VerifyAssignability(
-      class_loader, dex_file, deps.unassignable_types_, /* expected_assignability */ false, self);
+      class_loader, dex_file, deps.unassignable_types_, /* expected_assignability= */ false, self);
 
   result = result && VerifyClasses(class_loader, dex_file, deps.classes_, self);
   result = result && VerifyFields(class_loader, dex_file, deps.fields_, self);
diff --git a/test/1957-error-ext/expected.txt b/test/1957-error-ext/expected.txt
new file mode 100644
index 0000000..bfe7033
--- /dev/null
+++ b/test/1957-error-ext/expected.txt
@@ -0,0 +1,4 @@
+LastError is: <call returned error: class java.lang.RuntimeException: JVMTI_ERROR_ABSENT_INFORMATION>
+Got class java.lang.Exception: Failed to redefine class <Lart/Test1957$Transform;> due to JVMTI_ERROR_UNSUPPORTED_REDEFINITION_METHOD_DELETED
+LastError is: FAILURE TO REDEFINE Unable to perform redefinition of 'Lart/Test1957$Transform;': Total number of declared methods changed from 2 to 1
+LastError is: <call returned error: class java.lang.RuntimeException: JVMTI_ERROR_ABSENT_INFORMATION>
diff --git a/test/1957-error-ext/info.txt b/test/1957-error-ext/info.txt
new file mode 100644
index 0000000..ef772d9
--- /dev/null
+++ b/test/1957-error-ext/info.txt
@@ -0,0 +1 @@
+Test for get_last_error_message extension function.
diff --git a/test/1957-error-ext/lasterror.cc b/test/1957-error-ext/lasterror.cc
new file mode 100644
index 0000000..5aa3fbe
--- /dev/null
+++ b/test/1957-error-ext/lasterror.cc
@@ -0,0 +1,112 @@
+/*
+ * Copyright (C) 2018 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+
+#include <cstdio>
+#include <memory>
+#include <string>
+#include <vector>
+
+#include "android-base/logging.h"
+#include "android-base/stringprintf.h"
+
+#include "jni.h"
+#include "jvmti.h"
+#include "scoped_local_ref.h"
+#include "scoped_utf_chars.h"
+
+// Test infrastructure
+#include "jni_helper.h"
+#include "jvmti_helper.h"
+#include "test_env.h"
+#include "ti_macros.h"
+
+namespace art {
+namespace Test1957ErrorExt {
+
+using GetLastError = jvmtiError(*)(jvmtiEnv* env, char** msg);
+using ClearLastError = jvmtiError(*)(jvmtiEnv* env);
+
+template <typename T>
+static void Dealloc(T* t) {
+  jvmti_env->Deallocate(reinterpret_cast<unsigned char*>(t));
+}
+
+template <typename T, typename ...Rest>
+static void Dealloc(T* t, Rest... rs) {
+  Dealloc(t);
+  Dealloc(rs...);
+}
+
+static void DeallocParams(jvmtiParamInfo* params, jint n_params) {
+  for (jint i = 0; i < n_params; i++) {
+    Dealloc(params[i].name);
+  }
+}
+
+static jvmtiExtensionFunction FindExtensionMethod(JNIEnv* env, const std::string& name) {
+  jint n_ext;
+  jvmtiExtensionFunctionInfo* infos;
+  if (JvmtiErrorToException(env, jvmti_env, jvmti_env->GetExtensionFunctions(&n_ext, &infos))) {
+    return nullptr;
+  }
+  jvmtiExtensionFunction res = nullptr;
+  for (jint i = 0; i < n_ext; i++) {
+    jvmtiExtensionFunctionInfo* cur_info = &infos[i];
+    if (strcmp(name.c_str(), cur_info->id) == 0) {
+      res = cur_info->func;
+    }
+    // Cleanup the cur_info
+    DeallocParams(cur_info->params, cur_info->param_count);
+    Dealloc(cur_info->id, cur_info->short_description, cur_info->params, cur_info->errors);
+  }
+  // Cleanup the array.
+  Dealloc(infos);
+  if (res == nullptr) {
+    ScopedLocalRef<jclass> rt_exception(env, env->FindClass("java/lang/RuntimeException"));
+    env->ThrowNew(rt_exception.get(), (name + " extensions not found").c_str());
+    return nullptr;
+  }
+  return res;
+}
+
+extern "C" JNIEXPORT
+jstring JNICALL Java_art_Test1957_getLastError(JNIEnv* env, jclass klass ATTRIBUTE_UNUSED) {
+  GetLastError get_last_error = reinterpret_cast<GetLastError>(
+      FindExtensionMethod(env, "com.android.art.misc.get_last_error_message"));
+  if (get_last_error == nullptr) {
+    return nullptr;
+  }
+  char* msg;
+  if (JvmtiErrorToException(env, jvmti_env, get_last_error(jvmti_env, &msg))) {
+    return nullptr;
+  }
+
+  return env->NewStringUTF(msg);
+}
+
+extern "C" JNIEXPORT
+void JNICALL Java_art_Test1957_clearLastError(JNIEnv* env, jclass klass ATTRIBUTE_UNUSED) {
+  ClearLastError clear_last_error = reinterpret_cast<ClearLastError>(
+      FindExtensionMethod(env, "com.android.art.misc.clear_last_error_message"));
+  if (clear_last_error == nullptr) {
+    return;
+  }
+  JvmtiErrorToException(env, jvmti_env, clear_last_error(jvmti_env));
+}
+
+}  // namespace Test1957ErrorExt
+}  // namespace art
diff --git a/test/1957-error-ext/run b/test/1957-error-ext/run
new file mode 100755
index 0000000..8be0ed4
--- /dev/null
+++ b/test/1957-error-ext/run
@@ -0,0 +1,18 @@
+#!/bin/bash
+#
+# Copyright 2016 The Android Open Source Project
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+
+./default-run "$@" --jvmti
diff --git a/test/1957-error-ext/src/Main.java b/test/1957-error-ext/src/Main.java
new file mode 100644
index 0000000..7e5e075
--- /dev/null
+++ b/test/1957-error-ext/src/Main.java
@@ -0,0 +1,21 @@
+/*
+ * Copyright (C) 2017 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+public class Main {
+  public static void main(String[] args) throws Exception {
+    art.Test1957.run();
+  }
+}
diff --git a/test/1957-error-ext/src/art/Redefinition.java b/test/1957-error-ext/src/art/Redefinition.java
new file mode 100644
index 0000000..56d2938
--- /dev/null
+++ b/test/1957-error-ext/src/art/Redefinition.java
@@ -0,0 +1,91 @@
+/*
+ * Copyright (C) 2017 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package art;
+
+import java.util.ArrayList;
+// Common Redefinition functions. Placed here for use by CTS
+public class Redefinition {
+  public static final class CommonClassDefinition {
+    public final Class<?> target;
+    public final byte[] class_file_bytes;
+    public final byte[] dex_file_bytes;
+
+    public CommonClassDefinition(Class<?> target, byte[] class_file_bytes, byte[] dex_file_bytes) {
+      this.target = target;
+      this.class_file_bytes = class_file_bytes;
+      this.dex_file_bytes = dex_file_bytes;
+    }
+  }
+
+  // A set of possible test configurations. Test should set this if they need to.
+  // This must be kept in sync with the defines in ti-agent/common_helper.cc
+  public static enum Config {
+    COMMON_REDEFINE(0),
+    COMMON_RETRANSFORM(1),
+    COMMON_TRANSFORM(2);
+
+    private final int val;
+    private Config(int val) {
+      this.val = val;
+    }
+  }
+
+  public static void setTestConfiguration(Config type) {
+    nativeSetTestConfiguration(type.val);
+  }
+
+  private static native void nativeSetTestConfiguration(int type);
+
+  // Transforms the class
+  public static native void doCommonClassRedefinition(Class<?> target,
+                                                      byte[] classfile,
+                                                      byte[] dexfile);
+
+  public static void doMultiClassRedefinition(CommonClassDefinition... defs) {
+    ArrayList<Class<?>> classes = new ArrayList<>();
+    ArrayList<byte[]> class_files = new ArrayList<>();
+    ArrayList<byte[]> dex_files = new ArrayList<>();
+
+    for (CommonClassDefinition d : defs) {
+      classes.add(d.target);
+      class_files.add(d.class_file_bytes);
+      dex_files.add(d.dex_file_bytes);
+    }
+    doCommonMultiClassRedefinition(classes.toArray(new Class<?>[0]),
+                                   class_files.toArray(new byte[0][]),
+                                   dex_files.toArray(new byte[0][]));
+  }
+
+  public static void addMultiTransformationResults(CommonClassDefinition... defs) {
+    for (CommonClassDefinition d : defs) {
+      addCommonTransformationResult(d.target.getCanonicalName(),
+                                    d.class_file_bytes,
+                                    d.dex_file_bytes);
+    }
+  }
+
+  public static native void doCommonMultiClassRedefinition(Class<?>[] targets,
+                                                           byte[][] classfiles,
+                                                           byte[][] dexfiles);
+  public static native void doCommonClassRetransformation(Class<?>... target);
+  public static native void setPopRetransformations(boolean pop);
+  public static native void popTransformationFor(String name);
+  public static native void enableCommonRetransformation(boolean enable);
+  public static native void addCommonTransformationResult(String target_name,
+                                                          byte[] class_bytes,
+                                                          byte[] dex_bytes);
+}
diff --git a/test/1957-error-ext/src/art/Test1957.java b/test/1957-error-ext/src/art/Test1957.java
new file mode 100644
index 0000000..ffb68be
--- /dev/null
+++ b/test/1957-error-ext/src/art/Test1957.java
@@ -0,0 +1,86 @@
+/*
+ * Copyright (C) 2016 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package art;
+
+import java.util.Base64;
+public class Test1957 {
+
+  static class Transform {
+    public void sayHi() {
+      // Use lower 'h' to make sure the string will have a different string id
+      // than the transformation (the transformation code is the same except
+      // the actual printed String, which was making the test inacurately passing
+      // in JIT mode when loading the string from the dex cache, as the string ids
+      // of the two different strings were the same).
+      // We know the string ids will be different because lexicographically:
+      // "Goodbye" < "LTransform;" < "hello".
+      System.out.println("hello");
+    }
+  }
+
+  /**
+   * base64 encoded class/dex file for
+   * class Transform {
+   * }
+   */
+  private static final byte[] CLASS_BYTES = Base64.getDecoder().decode(
+    "yv66vgAAADUAEQoAAwAKBwAMBwAPAQAGPGluaXQ+AQADKClWAQAEQ29kZQEAD0xpbmVOdW1iZXJU" +
+    "YWJsZQEAClNvdXJjZUZpbGUBAA1UZXN0MTk1Ny5qYXZhDAAEAAUHABABABZhcnQvVGVzdDE5NTck" +
+    "VHJhbnNmb3JtAQAJVHJhbnNmb3JtAQAMSW5uZXJDbGFzc2VzAQAQamF2YS9sYW5nL09iamVjdAEA" +
+    "DGFydC9UZXN0MTk1NwAgAAIAAwAAAAAAAQAAAAQABQABAAYAAAAdAAEAAQAAAAUqtwABsQAAAAEA" +
+    "BwAAAAYAAQAAAAYAAgAIAAAAAgAJAA4AAAAKAAEAAgALAA0ACA==");
+  private static final byte[] DEX_BYTES = Base64.getDecoder().decode(
+    "ZGV4CjAzNQAQiK+oahCb4T18bDge0pSvp7rka4UQ2AY0AwAAcAAAAHhWNBIAAAAAAAAAAIgCAAAN" +
+    "AAAAcAAAAAYAAACkAAAAAQAAALwAAAAAAAAAAAAAAAIAAADIAAAAAQAAANgAAAA8AgAA+AAAABQB" +
+    "AAAcAQAANgEAAEYBAABqAQAAigEAAJ4BAACtAQAAuAEAALsBAADIAQAAzgEAANUBAAABAAAAAgAA" +
+    "AAMAAAAEAAAABQAAAAgAAAAIAAAABQAAAAAAAAAAAAAAAAAAAAQAAAAAAAAAAAAAAAAAAAAEAAAA" +
+    "AAAAAAYAAAB4AgAAWwIAAAAAAAABAAEAAQAAABABAAAEAAAAcBABAAAADgAGAA4ABjxpbml0PgAY" +
+    "TGFydC9UZXN0MTk1NyRUcmFuc2Zvcm07AA5MYXJ0L1Rlc3QxOTU3OwAiTGRhbHZpay9hbm5vdGF0" +
+    "aW9uL0VuY2xvc2luZ0NsYXNzOwAeTGRhbHZpay9hbm5vdGF0aW9uL0lubmVyQ2xhc3M7ABJMamF2" +
+    "YS9sYW5nL09iamVjdDsADVRlc3QxOTU3LmphdmEACVRyYW5zZm9ybQABVgALYWNjZXNzRmxhZ3MA" +
+    "BG5hbWUABXZhbHVlAHV+fkQ4eyJjb21waWxhdGlvbi1tb2RlIjoiZGVidWciLCJtaW4tYXBpIjox" +
+    "LCJzaGEtMSI6Ijg0NjI2ZDE0MmRiMmY4NzVhY2E2YjVlOWVmYWU3OThjYWQ5ZDlhNTAiLCJ2ZXJz" +
+    "aW9uIjoiMS40LjItZGV2In0AAgIBCxgBAgMCCQQIChcHAAABAACAgAT4AQAAAAAAAAACAAAATAIA" +
+    "AFICAABsAgAAAAAAAAAAAAAAAAAADgAAAAAAAAABAAAAAAAAAAEAAAANAAAAcAAAAAIAAAAGAAAA" +
+    "pAAAAAMAAAABAAAAvAAAAAUAAAACAAAAyAAAAAYAAAABAAAA2AAAAAEgAAABAAAA+AAAAAMgAAAB" +
+    "AAAAEAEAAAIgAAANAAAAFAEAAAQgAAACAAAATAIAAAAgAAABAAAAWwIAAAMQAAACAAAAaAIAAAYg" +
+    "AAABAAAAeAIAAAAQAAABAAAAiAIAAA==");
+
+  public static void run() {
+    Redefinition.setTestConfiguration(Redefinition.Config.COMMON_REDEFINE);
+    Transform t = new Transform();
+    System.out.println("LastError is: " + getLastErrorOrException());
+    try {
+      Redefinition.doCommonClassRedefinition(Transform.class, CLASS_BYTES, DEX_BYTES);
+    } catch (Throwable e) {
+      System.out.println("Got " + e.getClass().toString() + ": " + e.getMessage());
+    }
+    System.out.println("LastError is: " + getLastErrorOrException());
+    clearLastError();
+    System.out.println("LastError is: " + getLastErrorOrException());
+  }
+
+  public static String getLastErrorOrException() {
+    try {
+      return getLastError();
+    } catch (Throwable t) {
+      return "<call returned error: " + t.getClass().toString() + ": " + t.getMessage() + ">";
+    }
+  }
+  public static native String getLastError();
+  public static native void clearLastError();
+}
diff --git a/test/Android.bp b/test/Android.bp
index 8c1c1bf..561f95e 100644
--- a/test/Android.bp
+++ b/test/Android.bp
@@ -292,6 +292,7 @@
         "1950-unprepared-transform/unprepared_transform.cc",
         "1951-monitor-enter-no-suspend/raw_monitor.cc",
         "1953-pop-frame/pop_frame.cc",
+        "1957-error-ext/lasterror.cc",
     ],
     // Use NDK-compatible headers for ctstiagent.
     header_libs: [
diff --git a/test/knownfailures.json b/test/knownfailures.json
index f4f45ce..d831993 100644
--- a/test/knownfailures.json
+++ b/test/knownfailures.json
@@ -1027,7 +1027,8 @@
                   "679-locks",
                   "999-redefine-hiddenapi",
                   "1000-non-moving-space-stress",
-                  "1951-monitor-enter-no-suspend"],
+                  "1951-monitor-enter-no-suspend",
+                  "1957-error-ext"],
         "variant": "jvm",
         "description": ["Doesn't run on RI."]
     },
diff --git a/tools/cpp-define-generator/thread.def b/tools/cpp-define-generator/thread.def
index 7b19076..8c91dc8 100644
--- a/tools/cpp-define-generator/thread.def
+++ b/tools/cpp-define-generator/thread.def
@@ -56,5 +56,7 @@
            art::kSuspendRequest | art::kCheckpointRequest | art::kEmptyCheckpointRequest)
 ASM_DEFINE(THREAD_SUSPEND_REQUEST,
            art::kSuspendRequest)
+ASM_DEFINE(THREAD_USE_MTERP_OFFSET,
+           art::Thread::UseMterpOffset<art::kRuntimePointerSize>().Int32Value())
 ASM_DEFINE(THREAD_TOP_QUICK_FRAME_OFFSET,
            art::Thread::TopOfManagedStackOffset<art::kRuntimePointerSize>().Int32Value())
diff --git a/tools/ti-fast/tifast.cc b/tools/ti-fast/tifast.cc
index 00ef656..d02e549 100644
--- a/tools/ti-fast/tifast.cc
+++ b/tools/ti-fast/tifast.cc
@@ -574,7 +574,7 @@
 
 static std::string EventToName(jvmtiEvent desired_event) {
 #define CHECK_NAME(name, event, args, argnames) \
-  if (desired_event == event) { \
+  if (desired_event == (event)) { \
     return #name; \
   }
   FOR_ALL_SUPPORTED_EVENTS(CHECK_NAME);
diff --git a/tools/titrace/instruction_decoder.cc b/tools/titrace/instruction_decoder.cc
index 7f8b296..89904b3 100644
--- a/tools/titrace/instruction_decoder.cc
+++ b/tools/titrace/instruction_decoder.cc
@@ -484,7 +484,7 @@
    public:
     enum Opcode {
 #define MAKE_ENUM_DEFINITION(opcode, instruction_code, name, format, index, flags, extended_flags, verifier_flags) \
-      instruction_code = opcode,
+      instruction_code = opcode,  /* NOLINT */
 DEX_INSTRUCTION_LIST(MAKE_ENUM_DEFINITION)
 #undef MAKE_ENUM_DEFINITION
     };
diff --git a/tools/veridex/flow_analysis.cc b/tools/veridex/flow_analysis.cc
index e925e1d..1fca7e1 100644
--- a/tools/veridex/flow_analysis.cc
+++ b/tools/veridex/flow_analysis.cc
@@ -162,7 +162,7 @@
     case Instruction::IF_##cond##Z: { \
       RegisterValue val = GetRegister(instruction.VRegA()); \
       if (val.IsConstant()) { \
-        if (val.GetConstant() op 0) { \
+        if (val.GetConstant() op 0) {  /* NOLINT */ \
           return Instruction::kBranch; \
         } else { \
           return Instruction::kContinue; \