Merge "ART: show exact bytes along with human-friendly format"
diff --git a/build/Android.gtest.mk b/build/Android.gtest.mk
index 1bcca7c..a926d9a 100644
--- a/build/Android.gtest.mk
+++ b/build/Android.gtest.mk
@@ -525,7 +525,7 @@
 ifeq (,$(SANITIZE_HOST))
 $$(gtest_output): $$(gtest_exe) $$(gtest_deps)
 	$(hide) ($$(call ART_TEST_SKIP,$$(NAME)) && \
-		timeout -k 120s -s SIGRTMIN+2 2400s $(HOST_OUT_EXECUTABLES)/timeout_dumper \
+		timeout --foreground -k 120s -s SIGRTMIN+2 2400s $(HOST_OUT_EXECUTABLES)/timeout_dumper \
 			$$< --gtest_output=xml:$$@ && \
 		$$(call ART_TEST_PASSED,$$(NAME))) || $$(call ART_TEST_FAILED,$$(NAME))
 else
@@ -538,7 +538,7 @@
 # under ASAN.
 $$(gtest_output): $$(gtest_exe) $$(gtest_deps)
 	$(hide) ($$(call ART_TEST_SKIP,$$(NAME)) && set -o pipefail && \
-		ASAN_OPTIONS=detect_leaks=1 timeout -k 120s -s SIGRTMIN+2 3600s \
+		ASAN_OPTIONS=detect_leaks=1 timeout --foreground -k 120s -s SIGRTMIN+2 3600s \
 			$(HOST_OUT_EXECUTABLES)/timeout_dumper \
 				$$< --gtest_output=xml:$$@ 2>&1 | tee $$<.tmp.out >&2 && \
 		{ $$(call ART_TEST_PASSED,$$(NAME)) ; rm $$<.tmp.out ; }) || \
diff --git a/build/apex/Android.bp b/build/apex/Android.bp
index 0ec0a15..193a3c1 100644
--- a/build/apex/Android.bp
+++ b/build/apex/Android.bp
@@ -18,12 +18,16 @@
     "libopenjdkjvm",
     "libopenjdkjvmti",
     "libadbconnection",
+    "libjavacrypto",
 ]
 bionic_native_shared_libs = [
     "libc",
     "libm",
     "libdl",
 ]
+bionic_binaries_both = [
+    "linker",
+]
 // - Fake library that avoids namespace issues and gives some warnings for nosy apps.
 art_runtime_fake_native_shared_libs = [
      // FIXME: Does not work as-is, because `libart_fake` is defined in libart_fake/Android.mk,
@@ -51,6 +55,7 @@
     "libopenjdkjvmd",
     "libopenjdkjvmtid",
     "libadbconnectiond",
+    "libjavacrypto",
 ]
 
 // Files associated with bionic / managed core library time zone APIs.
@@ -113,7 +118,8 @@
         both: {
             // TODO: Add logic to create a `dalvikvm` symlink to `dalvikvm32` or `dalvikvm64`
             // (see `symlink_preferred_arch` in art/dalvikvm/Android.bp).
-            binaries: art_runtime_base_binaries_both,
+            binaries: art_runtime_base_binaries_both
+                + bionic_binaries_both,
         },
         prefer32: {
             binaries: art_runtime_base_binaries_prefer32,
@@ -142,7 +148,8 @@
         both: {
             // TODO: Add logic to create a `dalvikvm` symlink to `dalvikvm32` or `dalvikvm64`
             // (see `symlink_preferred_arch` in art/dalvikvm/Android.bp).
-            binaries: art_runtime_base_binaries_both,
+            binaries: art_runtime_base_binaries_both
+                + bionic_binaries_both,
         },
         prefer32: {
             binaries: art_runtime_base_binaries_prefer32
diff --git a/build/apex/ld.config.txt b/build/apex/ld.config.txt
index ac4d1eb..014b115 100644
--- a/build/apex/ld.config.txt
+++ b/build/apex/ld.config.txt
@@ -1 +1,31 @@
-# TODO: Write me.
+# Copyright (C) 2018 The Android Open Source Project
+#
+# Bionic loader config file for the Runtime APEX.
+#
+# There are no versioned APEX paths here - this APEX module does not support
+# having several versions mounted.
+
+dir.runtime = /apex/com.android.runtime/bin/
+
+[runtime]
+additional.namespaces = platform
+
+# Keep in sync with runtime namespace in /system/etc/ld.config.txt.
+namespace.default.isolated = true
+namespace.default.search.paths = /apex/com.android.runtime/${LIB}
+# odex files are in /system/framework. dalvikvm has to be able to dlopen the
+# files for CTS.
+namespace.default.permitted.paths = /system/framework
+namespace.default.links = platform
+# TODO(b/119867084): Restrict fallback to platform namespace to PALette library.
+namespace.default.link.platform.allow_all_shared_libs = true
+
+# Keep in sync with default namespace in /system/etc/ld.config.txt.
+namespace.platform.isolated = true
+namespace.platform.search.paths = /system/${LIB}
+namespace.platform.links = default
+namespace.platform.link.default.shared_libs  = libc.so:libdl.so:libm.so
+namespace.platform.link.default.shared_libs += libart.so:libartd.so
+namespace.platform.link.default.shared_libs += libnativebridge.so
+namespace.platform.link.default.shared_libs += libnativehelper.so
+namespace.platform.link.default.shared_libs += libnativeloader.so
diff --git a/compiler/cfi_test.h b/compiler/cfi_test.h
index 581edaa..658bdb3 100644
--- a/compiler/cfi_test.h
+++ b/compiler/cfi_test.h
@@ -53,13 +53,13 @@
     dwarf::WriteCIE(is64bit, dwarf::Reg(8), initial_opcodes, kCFIFormat, &debug_frame_data_);
     std::vector<uintptr_t> debug_frame_patches;
     dwarf::WriteFDE(is64bit,
-                    /* section_address */ 0,
-                    /* cie_address */ 0,
-                    /* code_address */ 0,
+                    /* section_address= */ 0,
+                    /* cie_address= */ 0,
+                    /* code_address= */ 0,
                     actual_asm.size(),
                     actual_cfi,
                     kCFIFormat,
-                    /* buffer_address */ 0,
+                    /* buffer_address= */ 0,
                     &debug_frame_data_,
                     &debug_frame_patches);
     ReformatCfi(Objdump(false, "-W"), &lines);
diff --git a/compiler/common_compiler_test.cc b/compiler/common_compiler_test.cc
index be6da71..72afb98 100644
--- a/compiler/common_compiler_test.cc
+++ b/compiler/common_compiler_test.cc
@@ -197,7 +197,7 @@
   compiler_driver_.reset(new CompilerDriver(compiler_options_.get(),
                                             compiler_kind_,
                                             number_of_threads_,
-                                            /* swap_fd */ -1));
+                                            /* swap_fd= */ -1));
 }
 
 void CommonCompilerTest::SetUpRuntimeOptions(RuntimeOptions* options) {
diff --git a/compiler/compiled_method.h b/compiler/compiled_method.h
index 75790c9..e92777f 100644
--- a/compiler/compiled_method.h
+++ b/compiler/compiled_method.h
@@ -136,7 +136,7 @@
   // This affects debug information generated at link time.
   void MarkAsIntrinsic() {
     DCHECK(!IsIntrinsic());
-    SetPackedField<IsIntrinsicField>(/* value */ true);
+    SetPackedField<IsIntrinsicField>(/* value= */ true);
   }
 
   ArrayRef<const uint8_t> GetVmapTable() const;
diff --git a/compiler/debug/dwarf/dwarf_test.cc b/compiler/debug/dwarf/dwarf_test.cc
index 933034f..6512314 100644
--- a/compiler/debug/dwarf/dwarf_test.cc
+++ b/compiler/debug/dwarf/dwarf_test.cc
@@ -334,7 +334,7 @@
 
   std::vector<uintptr_t> debug_info_patches;
   std::vector<uintptr_t> expected_patches = { 16, 20, 29, 33, 42, 46 };
-  dwarf::WriteDebugInfoCU(0 /* debug_abbrev_offset */, info,
+  dwarf::WriteDebugInfoCU(/* debug_abbrev_offset= */ 0, info,
                           0, &debug_info_data_, &debug_info_patches);
 
   EXPECT_EQ(expected_patches, debug_info_patches);
diff --git a/compiler/debug/dwarf/headers.h b/compiler/debug/dwarf/headers.h
index 28f1084..4a27178 100644
--- a/compiler/debug/dwarf/headers.h
+++ b/compiler/debug/dwarf/headers.h
@@ -107,7 +107,9 @@
   } else {
     DCHECK(format == DW_DEBUG_FRAME_FORMAT);
     // Relocate code_address if it has absolute value.
-    patch_locations->push_back(buffer_address + buffer->size() - section_address);
+    if (patch_locations != nullptr) {
+      patch_locations->push_back(buffer_address + buffer->size() - section_address);
+    }
   }
   if (is64bit) {
     writer.PushUint64(code_address);
@@ -122,6 +124,30 @@
   writer.UpdateUint32(fde_header_start, writer.data()->size() - fde_header_start - 4);
 }
 
+// Read singe FDE entry from 'data' (which is advanced).
+template<typename Addr>
+bool ReadFDE(const uint8_t** data, Addr* addr, Addr* size, ArrayRef<const uint8_t>* opcodes) {
+  struct Header {
+    uint32_t length;
+    int32_t cie_pointer;
+    Addr addr;
+    Addr size;
+    uint8_t augmentaion;
+    uint8_t opcodes[];
+  } PACKED(1);
+  const Header* header = reinterpret_cast<const Header*>(*data);
+  const size_t length = 4 + header->length;
+  *data += length;
+  if (header->cie_pointer == -1) {
+    return false;  // Not an FDE entry.
+  }
+  DCHECK_EQ(header->cie_pointer, 0);  // Expects single CIE. Assumes DW_DEBUG_FRAME_FORMAT.
+  *addr = header->addr;
+  *size = header->size;
+  *opcodes = ArrayRef<const uint8_t>(header->opcodes, length - offsetof(Header, opcodes));
+  return true;
+}
+
 // Write compilation unit (CU) to .debug_info section.
 template<typename Vector>
 void WriteDebugInfoCU(uint32_t debug_abbrev_offset,
diff --git a/compiler/debug/elf_debug_frame_writer.h b/compiler/debug/elf_debug_frame_writer.h
index 27b70c8..e0116c6 100644
--- a/compiler/debug/elf_debug_frame_writer.h
+++ b/compiler/debug/elf_debug_frame_writer.h
@@ -182,7 +182,7 @@
   std::vector<const MethodDebugInfo*> sorted_method_infos;
   sorted_method_infos.reserve(method_infos.size());
   for (size_t i = 0; i < method_infos.size(); i++) {
-    if (!method_infos[i].cfi.empty() && !method_infos[i].deduped) {
+    if (!method_infos[i].deduped) {
       sorted_method_infos.push_back(&method_infos[i]);
     }
   }
@@ -222,7 +222,6 @@
     buffer.clear();
     for (const MethodDebugInfo* mi : sorted_method_infos) {
       DCHECK(!mi->deduped);
-      DCHECK(!mi->cfi.empty());
       const Elf_Addr code_address = mi->code_address +
           (mi->is_code_address_text_relative ? builder->GetText()->GetAddress() : 0);
       if (format == dwarf::DW_EH_FRAME_FORMAT) {
diff --git a/compiler/debug/elf_debug_reader.h b/compiler/debug/elf_debug_reader.h
new file mode 100644
index 0000000..91b1b3e
--- /dev/null
+++ b/compiler/debug/elf_debug_reader.h
@@ -0,0 +1,104 @@
+/*
+ * Copyright (C) 2018 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef ART_COMPILER_DEBUG_ELF_DEBUG_READER_H_
+#define ART_COMPILER_DEBUG_ELF_DEBUG_READER_H_
+
+#include "base/array_ref.h"
+#include "debug/dwarf/headers.h"
+#include "elf.h"
+#include "xz_utils.h"
+
+namespace art {
+namespace debug {
+
+// Trivial ELF file reader.
+//
+// It is the bare minimum needed to read mini-debug-info symbols for unwinding.
+// We use it to merge JIT mini-debug-infos together or to prune them after GC.
+// The consumed ELF file comes from ART JIT.
+template <typename ElfTypes, typename VisitSym, typename VisitFde>
+static void ReadElfSymbols(const uint8_t* elf, VisitSym visit_sym, VisitFde visit_fde) {
+  // Note that the input buffer might be misaligned.
+  typedef typename ElfTypes::Ehdr ALIGNED(1) Elf_Ehdr;
+  typedef typename ElfTypes::Shdr ALIGNED(1) Elf_Shdr;
+  typedef typename ElfTypes::Sym ALIGNED(1) Elf_Sym;
+  typedef typename ElfTypes::Addr ALIGNED(1) Elf_Addr;
+
+  // Read and check the elf header.
+  const Elf_Ehdr* header = reinterpret_cast<const Elf_Ehdr*>(elf);
+  CHECK(header->checkMagic());
+
+  // Find sections that we are interested in.
+  const Elf_Shdr* sections = reinterpret_cast<const Elf_Shdr*>(elf + header->e_shoff);
+  const Elf_Shdr* strtab = nullptr;
+  const Elf_Shdr* symtab = nullptr;
+  const Elf_Shdr* debug_frame = nullptr;
+  const Elf_Shdr* gnu_debugdata = nullptr;
+  for (size_t i = 1 /* skip null section */; i < header->e_shnum; i++) {
+    const Elf_Shdr* section = sections + i;
+    const char* name = reinterpret_cast<const char*>(
+        elf + sections[header->e_shstrndx].sh_offset + section->sh_name);
+    if (strcmp(name, ".strtab") == 0) {
+      strtab = section;
+    } else if (strcmp(name, ".symtab") == 0) {
+      symtab = section;
+    } else if (strcmp(name, ".debug_frame") == 0) {
+      debug_frame = section;
+    } else if (strcmp(name, ".gnu_debugdata") == 0) {
+      gnu_debugdata = section;
+    }
+  }
+
+  // Visit symbols.
+  if (symtab != nullptr && strtab != nullptr) {
+    const Elf_Sym* symbols = reinterpret_cast<const Elf_Sym*>(elf + symtab->sh_offset);
+    DCHECK_EQ(symtab->sh_entsize, sizeof(Elf_Sym));
+    size_t count = symtab->sh_size / sizeof(Elf_Sym);
+    for (size_t i = 1 /* skip null symbol */; i < count; i++) {
+      Elf_Sym symbol = symbols[i];
+      if (symbol.getBinding() != STB_LOCAL) {  // Ignore local symbols (e.g. "$t").
+        const uint8_t* name = elf + strtab->sh_offset + symbol.st_name;
+        visit_sym(symbol, reinterpret_cast<const char*>(name));
+      }
+    }
+  }
+
+  // Visit CFI (unwind) data.
+  if (debug_frame != nullptr) {
+    const uint8_t* data = elf + debug_frame->sh_offset;
+    const uint8_t* end = data + debug_frame->sh_size;
+    while (data < end) {
+      Elf_Addr addr, size;
+      ArrayRef<const uint8_t> opcodes;
+      if (dwarf::ReadFDE<Elf_Addr>(&data, &addr, &size, &opcodes)) {
+        visit_fde(addr, size, opcodes);
+      }
+    }
+  }
+
+  // Process embedded compressed ELF file.
+  if (gnu_debugdata != nullptr) {
+    ArrayRef<const uint8_t> compressed(elf + gnu_debugdata->sh_offset, gnu_debugdata->sh_size);
+    std::vector<uint8_t> decompressed;
+    XzDecompress(compressed, &decompressed);
+    ReadElfSymbols<ElfTypes>(decompressed.data(), visit_sym, visit_fde);
+  }
+}
+
+}  // namespace debug
+}  // namespace art
+#endif  // ART_COMPILER_DEBUG_ELF_DEBUG_READER_H_
diff --git a/compiler/debug/elf_debug_writer.cc b/compiler/debug/elf_debug_writer.cc
index 1ecb1d8e..0f2d73e 100644
--- a/compiler/debug/elf_debug_writer.cc
+++ b/compiler/debug/elf_debug_writer.cc
@@ -21,12 +21,14 @@
 #include <vector>
 
 #include "base/array_ref.h"
+#include "base/stl_util.h"
 #include "debug/dwarf/dwarf_constants.h"
 #include "debug/elf_compilation_unit.h"
 #include "debug/elf_debug_frame_writer.h"
 #include "debug/elf_debug_info_writer.h"
 #include "debug/elf_debug_line_writer.h"
 #include "debug/elf_debug_loc_writer.h"
+#include "debug/elf_debug_reader.h"
 #include "debug/elf_symtab_writer.h"
 #include "debug/method_debug_info.h"
 #include "debug/xz_utils.h"
@@ -46,7 +48,7 @@
                     dwarf::CFIFormat cfi_format,
                     bool write_oat_patches) {
   // Write .strtab and .symtab.
-  WriteDebugSymbols(builder, false /* mini-debug-info */, debug_info);
+  WriteDebugSymbols(builder, /* mini-debug-info= */ false, debug_info);
 
   // Write .debug_frame.
   WriteCFISection(builder, debug_info.compiled_methods, cfi_format, write_oat_patches);
@@ -123,17 +125,17 @@
   linker::VectorOutputStream out("Mini-debug-info ELF file", &buffer);
   std::unique_ptr<linker::ElfBuilder<ElfTypes>> builder(
       new linker::ElfBuilder<ElfTypes>(isa, features, &out));
-  builder->Start(false /* write_program_headers */);
+  builder->Start(/* write_program_headers= */ false);
   // Mirror ELF sections as NOBITS since the added symbols will reference them.
   builder->GetText()->AllocateVirtualMemory(text_section_address, text_section_size);
   if (dex_section_size != 0) {
     builder->GetDex()->AllocateVirtualMemory(dex_section_address, dex_section_size);
   }
-  WriteDebugSymbols(builder.get(), true /* mini-debug-info */, debug_info);
+  WriteDebugSymbols(builder.get(), /* mini-debug-info= */ true, debug_info);
   WriteCFISection(builder.get(),
                   debug_info.compiled_methods,
                   dwarf::DW_DEBUG_FRAME_FORMAT,
-                  false /* write_oat_paches */);
+                  /* write_oat_patches= */ false);
   builder->End();
   CHECK(builder->Good());
   std::vector<uint8_t> compressed_buffer;
@@ -185,27 +187,165 @@
   std::unique_ptr<linker::ElfBuilder<ElfTypes>> builder(
       new linker::ElfBuilder<ElfTypes>(isa, features, &out));
   // No program headers since the ELF file is not linked and has no allocated sections.
-  builder->Start(false /* write_program_headers */);
+  builder->Start(/* write_program_headers= */ false);
   builder->GetText()->AllocateVirtualMemory(method_info.code_address, method_info.code_size);
   if (mini_debug_info) {
     // The compression is great help for multiple methods but it is not worth it for a
     // single method due to the overheads so skip the compression here for performance.
-    WriteDebugSymbols(builder.get(), true /* mini-debug-info */, debug_info);
+    WriteDebugSymbols(builder.get(), /* mini-debug-info= */ true, debug_info);
     WriteCFISection(builder.get(),
                     debug_info.compiled_methods,
                     dwarf::DW_DEBUG_FRAME_FORMAT,
-                    false /* write_oat_paches */);
+                    /* write_oat_patches= */ false);
   } else {
     WriteDebugInfo(builder.get(),
                    debug_info,
                    dwarf::DW_DEBUG_FRAME_FORMAT,
-                   false /* write_oat_patches */);
+                   /* write_oat_patches= */ false);
   }
   builder->End();
   CHECK(builder->Good());
+  // Verify the ELF file by reading it back using the trivial reader.
+  if (kIsDebugBuild) {
+    using Elf_Sym = typename ElfTypes::Sym;
+    using Elf_Addr = typename ElfTypes::Addr;
+    size_t num_syms = 0;
+    size_t num_cfis = 0;
+    ReadElfSymbols<ElfTypes>(
+        buffer.data(),
+        [&](Elf_Sym sym, const char*) {
+          DCHECK_EQ(sym.st_value, method_info.code_address + CompiledMethod::CodeDelta(isa));
+          DCHECK_EQ(sym.st_size, method_info.code_size);
+          num_syms++;
+        },
+        [&](Elf_Addr addr, Elf_Addr size, ArrayRef<const uint8_t> opcodes) {
+          DCHECK_EQ(addr, method_info.code_address);
+          DCHECK_EQ(size, method_info.code_size);
+          DCHECK_GE(opcodes.size(), method_info.cfi.size());
+          DCHECK_EQ(memcmp(opcodes.data(), method_info.cfi.data(), method_info.cfi.size()), 0);
+          num_cfis++;
+        });
+    DCHECK_EQ(num_syms, 1u);
+    DCHECK_EQ(num_cfis, 1u);
+  }
   return buffer;
 }
 
+// Combine several mini-debug-info ELF files into one, while filtering some symbols.
+std::vector<uint8_t> PackElfFileForJIT(
+    InstructionSet isa,
+    const InstructionSetFeatures* features,
+    std::vector<const uint8_t*>& added_elf_files,
+    std::vector<const void*>& removed_symbols,
+    /*out*/ size_t* num_symbols) {
+  using ElfTypes = ElfRuntimeTypes;
+  using Elf_Addr = typename ElfTypes::Addr;
+  using Elf_Sym = typename ElfTypes::Sym;
+  CHECK_EQ(sizeof(Elf_Addr), static_cast<size_t>(GetInstructionSetPointerSize(isa)));
+  const bool is64bit = Is64BitInstructionSet(isa);
+  auto is_removed_symbol = [&removed_symbols](Elf_Addr addr) {
+    const void* code_ptr = reinterpret_cast<const void*>(addr);
+    return std::binary_search(removed_symbols.begin(), removed_symbols.end(), code_ptr);
+  };
+  uint64_t min_address = std::numeric_limits<uint64_t>::max();
+  uint64_t max_address = 0;
+
+  // Produce the inner ELF file.
+  // It will contain the symbols (.symtab) and unwind information (.debug_frame).
+  std::vector<uint8_t> inner_elf_file;
+  {
+    inner_elf_file.reserve(1 * KB);  // Approximate size of ELF file with a single symbol.
+    linker::VectorOutputStream out("Mini-debug-info ELF file for JIT", &inner_elf_file);
+    std::unique_ptr<linker::ElfBuilder<ElfTypes>> builder(
+        new linker::ElfBuilder<ElfTypes>(isa, features, &out));
+    builder->Start(/*write_program_headers=*/ false);
+    auto* text = builder->GetText();
+    auto* strtab = builder->GetStrTab();
+    auto* symtab = builder->GetSymTab();
+    auto* debug_frame = builder->GetDebugFrame();
+    std::deque<Elf_Sym> symbols;
+    std::vector<uint8_t> debug_frame_buffer;
+    WriteCIE(isa, dwarf::DW_DEBUG_FRAME_FORMAT, &debug_frame_buffer);
+
+    // Write symbols names. All other data is buffered.
+    strtab->Start();
+    strtab->Write("");  // strtab should start with empty string.
+    for (const uint8_t* added_elf_file : added_elf_files) {
+      ReadElfSymbols<ElfTypes>(
+          added_elf_file,
+          [&](Elf_Sym sym, const char* name) {
+              if (is_removed_symbol(sym.st_value)) {
+                return;
+              }
+              sym.st_name = strtab->Write(name);
+              symbols.push_back(sym);
+              min_address = std::min<uint64_t>(min_address, sym.st_value);
+              max_address = std::max<uint64_t>(max_address, sym.st_value + sym.st_size);
+          },
+          [&](Elf_Addr addr, Elf_Addr size, ArrayRef<const uint8_t> opcodes) {
+              if (is_removed_symbol(addr)) {
+                return;
+              }
+              WriteFDE(is64bit,
+                       /*section_address=*/ 0,
+                       /*cie_address=*/ 0,
+                       addr,
+                       size,
+                       opcodes,
+                       dwarf::DW_DEBUG_FRAME_FORMAT,
+                       debug_frame_buffer.size(),
+                       &debug_frame_buffer,
+                       /*patch_locations=*/ nullptr);
+          });
+    }
+    strtab->End();
+
+    // Create .text covering the code range. Needed for gdb to find the symbols.
+    if (max_address > min_address) {
+      text->AllocateVirtualMemory(min_address, max_address - min_address);
+    }
+
+    // Add the symbols.
+    *num_symbols = symbols.size();
+    for (; !symbols.empty(); symbols.pop_front()) {
+      symtab->Add(symbols.front(), text);
+    }
+    symtab->WriteCachedSection();
+
+    // Add the CFI/unwind section.
+    debug_frame->Start();
+    debug_frame->WriteFully(debug_frame_buffer.data(), debug_frame_buffer.size());
+    debug_frame->End();
+
+    builder->End();
+    CHECK(builder->Good());
+  }
+
+  // Produce the outer ELF file.
+  // It contains only the inner ELF file compressed as .gnu_debugdata section.
+  // This extra wrapping is not necessary but the compression saves space.
+  std::vector<uint8_t> outer_elf_file;
+  {
+    std::vector<uint8_t> gnu_debugdata;
+    gnu_debugdata.reserve(inner_elf_file.size() / 4);
+    XzCompress(ArrayRef<const uint8_t>(inner_elf_file), &gnu_debugdata);
+
+    outer_elf_file.reserve(KB + gnu_debugdata.size());
+    linker::VectorOutputStream out("Mini-debug-info ELF file for JIT", &outer_elf_file);
+    std::unique_ptr<linker::ElfBuilder<ElfTypes>> builder(
+        new linker::ElfBuilder<ElfTypes>(isa, features, &out));
+    builder->Start(/*write_program_headers=*/ false);
+    if (max_address > min_address) {
+      builder->GetText()->AllocateVirtualMemory(min_address, max_address - min_address);
+    }
+    builder->WriteSection(".gnu_debugdata", &gnu_debugdata);
+    builder->End();
+    CHECK(builder->Good());
+  }
+
+  return outer_elf_file;
+}
+
 std::vector<uint8_t> WriteDebugElfFileForClasses(
     InstructionSet isa,
     const InstructionSetFeatures* features,
@@ -219,12 +359,12 @@
   std::unique_ptr<linker::ElfBuilder<ElfTypes>> builder(
       new linker::ElfBuilder<ElfTypes>(isa, features, &out));
   // No program headers since the ELF file is not linked and has no allocated sections.
-  builder->Start(false /* write_program_headers */);
+  builder->Start(/* write_program_headers= */ false);
   ElfDebugInfoWriter<ElfTypes> info_writer(builder.get());
   info_writer.Start();
   ElfCompilationUnitWriter<ElfTypes> cu_writer(&info_writer);
   cu_writer.Write(types);
-  info_writer.End(false /* write_oat_patches */);
+  info_writer.End(/* write_oat_patches= */ false);
 
   builder->End();
   CHECK(builder->Good());
diff --git a/compiler/debug/elf_debug_writer.h b/compiler/debug/elf_debug_writer.h
index 8ad0c42..85ab356 100644
--- a/compiler/debug/elf_debug_writer.h
+++ b/compiler/debug/elf_debug_writer.h
@@ -56,6 +56,13 @@
     bool mini_debug_info,
     const MethodDebugInfo& method_info);
 
+std::vector<uint8_t> PackElfFileForJIT(
+    InstructionSet isa,
+    const InstructionSetFeatures* features,
+    std::vector<const uint8_t*>& added_elf_files,
+    std::vector<const void*>& removed_symbols,
+    /*out*/ size_t* num_symbols);
+
 std::vector<uint8_t> WriteDebugElfFileForClasses(
     InstructionSet isa,
     const InstructionSetFeatures* features,
diff --git a/compiler/dex/dex_to_dex_compiler.cc b/compiler/dex/dex_to_dex_compiler.cc
index c124ef5..cf52dd9 100644
--- a/compiler/dex/dex_to_dex_compiler.cc
+++ b/compiler/dex/dex_to_dex_compiler.cc
@@ -473,7 +473,7 @@
           method_idx,
           unit_.GetDexCache(),
           unit_.GetClassLoader(),
-          /* referrer */ nullptr,
+          /* referrer= */ nullptr,
           kVirtual);
 
   if (UNLIKELY(resolved_method == nullptr)) {
diff --git a/compiler/dex/dex_to_dex_decompiler_test.cc b/compiler/dex/dex_to_dex_decompiler_test.cc
index b055416..1f04546 100644
--- a/compiler/dex/dex_to_dex_decompiler_test.cc
+++ b/compiler/dex/dex_to_dex_decompiler_test.cc
@@ -95,7 +95,7 @@
         optimizer::ArtDecompileDEX(*updated_dex_file,
                                    *accessor.GetCodeItem(method),
                                    table,
-                                   /* decompile_return_instruction */ true);
+                                   /* decompile_return_instruction= */ true);
       }
     }
 
diff --git a/compiler/dex/inline_method_analyser.cc b/compiler/dex/inline_method_analyser.cc
index 183173b..ba2ebd9 100644
--- a/compiler/dex/inline_method_analyser.cc
+++ b/compiler/dex/inline_method_analyser.cc
@@ -216,7 +216,7 @@
   DCHECK(IsInstructionIPut(new_iput->Opcode()));
   uint32_t field_index = new_iput->VRegC_22c();
   ClassLinker* class_linker = Runtime::Current()->GetClassLinker();
-  ArtField* field = class_linker->LookupResolvedField(field_index, method, /* is_static */ false);
+  ArtField* field = class_linker->LookupResolvedField(field_index, method, /* is_static= */ false);
   if (UNLIKELY(field == nullptr)) {
     return false;
   }
@@ -228,7 +228,7 @@
     }
     ArtField* f = class_linker->LookupResolvedField(iputs[old_pos].field_index,
                                                     method,
-                                                    /* is_static */ false);
+                                                    /* is_static= */ false);
     DCHECK(f != nullptr);
     if (f == field) {
       auto back_it = std::copy(iputs + old_pos + 1, iputs + arraysize(iputs), iputs + old_pos);
@@ -713,7 +713,7 @@
   }
   ObjPtr<mirror::DexCache> dex_cache = method->GetDexCache();
   ClassLinker* class_linker = Runtime::Current()->GetClassLinker();
-  ArtField* field = class_linker->LookupResolvedField(field_idx, method, /* is_static */ false);
+  ArtField* field = class_linker->LookupResolvedField(field_idx, method, /* is_static= */ false);
   if (field == nullptr || field->IsStatic()) {
     return false;
   }
diff --git a/compiler/dex/verification_results.cc b/compiler/dex/verification_results.cc
index 5a34efb..6bd5fe8 100644
--- a/compiler/dex/verification_results.cc
+++ b/compiler/dex/verification_results.cc
@@ -112,7 +112,7 @@
   // which have no verifier error, nor has methods that we know will throw
   // at runtime.
   std::unique_ptr<VerifiedMethod> verified_method = std::make_unique<VerifiedMethod>(
-      /* encountered_error_types */ 0, /* has_runtime_throw */ false);
+      /* encountered_error_types= */ 0, /* has_runtime_throw= */ false);
   if (atomic_verified_methods_.Insert(ref,
                                       /*expected*/ nullptr,
                                       verified_method.get()) ==
diff --git a/compiler/dex/verified_method.cc b/compiler/dex/verified_method.cc
index f2da3ff..54f216a 100644
--- a/compiler/dex/verified_method.cc
+++ b/compiler/dex/verified_method.cc
@@ -82,7 +82,7 @@
           method_verifier->ResolveCheckedClass(dex::TypeIndex(inst.VRegB_21c()));
       // Pass null for the method verifier to not record the VerifierDeps dependency
       // if the types are not assignable.
-      if (cast_type.IsStrictlyAssignableFrom(reg_type, /* method_verifier */ nullptr)) {
+      if (cast_type.IsStrictlyAssignableFrom(reg_type, /* verifier= */ nullptr)) {
         // The types are assignable, we record that dependency in the VerifierDeps so
         // that if this changes after OTA, we will re-verify again.
         // We check if reg_type has a class, as the verifier may have inferred it's
@@ -92,8 +92,8 @@
           verifier::VerifierDeps::MaybeRecordAssignability(method_verifier->GetDexFile(),
                                                            cast_type.GetClass(),
                                                            reg_type.GetClass(),
-                                                           /* strict */ true,
-                                                           /* assignable */ true);
+                                                           /* is_strict= */ true,
+                                                           /* is_assignable= */ true);
         }
         if (safe_cast_set_ == nullptr) {
           safe_cast_set_.reset(new SafeCastSet());
diff --git a/compiler/driver/compiled_method_storage_test.cc b/compiler/driver/compiled_method_storage_test.cc
index 9fac2bc..05eacd8 100644
--- a/compiler/driver/compiled_method_storage_test.cc
+++ b/compiler/driver/compiled_method_storage_test.cc
@@ -23,7 +23,7 @@
 namespace art {
 
 TEST(CompiledMethodStorage, Deduplicate) {
-  CompiledMethodStorage storage(/* swap_fd */ -1);
+  CompiledMethodStorage storage(/* swap_fd= */ -1);
 
   ASSERT_TRUE(storage.DedupeEnabled());  // The default.
 
diff --git a/compiler/driver/compiler_driver.cc b/compiler/driver/compiler_driver.cc
index 0039be0..36beb59 100644
--- a/compiler/driver/compiler_driver.cc
+++ b/compiler/driver/compiler_driver.cc
@@ -112,19 +112,7 @@
 class CompilerDriver::AOTCompilationStats {
  public:
   AOTCompilationStats()
-      : stats_lock_("AOT compilation statistics lock"),
-        resolved_instance_fields_(0), unresolved_instance_fields_(0),
-        resolved_local_static_fields_(0), resolved_static_fields_(0), unresolved_static_fields_(0),
-        type_based_devirtualization_(0),
-        safe_casts_(0), not_safe_casts_(0) {
-    for (size_t i = 0; i <= kMaxInvokeType; i++) {
-      resolved_methods_[i] = 0;
-      unresolved_methods_[i] = 0;
-      virtual_made_direct_[i] = 0;
-      direct_calls_to_boot_[i] = 0;
-      direct_methods_to_boot_[i] = 0;
-    }
-  }
+      : stats_lock_("AOT compilation statistics lock") {}
 
   void Dump() {
     DumpStat(resolved_instance_fields_, unresolved_instance_fields_, "instance fields resolved");
@@ -141,6 +129,16 @@
              type_based_devirtualization_,
              "virtual/interface calls made direct based on type information");
 
+    const size_t total = std::accumulate(
+        class_status_count_,
+        class_status_count_ + static_cast<size_t>(ClassStatus::kLast) + 1,
+        0u);
+    for (size_t i = 0; i <= static_cast<size_t>(ClassStatus::kLast); ++i) {
+      std::ostringstream oss;
+      oss << "classes with status " << static_cast<ClassStatus>(i);
+      DumpStat(class_status_count_[i], total - class_status_count_[i], oss.str().c_str());
+    }
+
     for (size_t i = 0; i <= kMaxInvokeType; i++) {
       std::ostringstream oss;
       oss << static_cast<InvokeType>(i) << " methods were AOT resolved";
@@ -219,26 +217,34 @@
     not_safe_casts_++;
   }
 
+  // Register a class status.
+  void AddClassStatus(ClassStatus status) REQUIRES(!stats_lock_) {
+    STATS_LOCK();
+    ++class_status_count_[static_cast<size_t>(status)];
+  }
+
  private:
   Mutex stats_lock_;
 
-  size_t resolved_instance_fields_;
-  size_t unresolved_instance_fields_;
+  size_t resolved_instance_fields_ = 0u;
+  size_t unresolved_instance_fields_ = 0u;
 
-  size_t resolved_local_static_fields_;
-  size_t resolved_static_fields_;
-  size_t unresolved_static_fields_;
+  size_t resolved_local_static_fields_ = 0u;
+  size_t resolved_static_fields_ = 0u;
+  size_t unresolved_static_fields_ = 0u;
   // Type based devirtualization for invoke interface and virtual.
-  size_t type_based_devirtualization_;
+  size_t type_based_devirtualization_ = 0u;
 
-  size_t resolved_methods_[kMaxInvokeType + 1];
-  size_t unresolved_methods_[kMaxInvokeType + 1];
-  size_t virtual_made_direct_[kMaxInvokeType + 1];
-  size_t direct_calls_to_boot_[kMaxInvokeType + 1];
-  size_t direct_methods_to_boot_[kMaxInvokeType + 1];
+  size_t resolved_methods_[kMaxInvokeType + 1] = {};
+  size_t unresolved_methods_[kMaxInvokeType + 1] = {};
+  size_t virtual_made_direct_[kMaxInvokeType + 1] = {};
+  size_t direct_calls_to_boot_[kMaxInvokeType + 1] = {};
+  size_t direct_methods_to_boot_[kMaxInvokeType + 1] = {};
 
-  size_t safe_casts_;
-  size_t not_safe_casts_;
+  size_t safe_casts_ = 0u;
+  size_t not_safe_casts_ = 0u;
+
+  size_t class_status_count_[static_cast<size_t>(ClassStatus::kLast) + 1] = {};
 
   DISALLOW_COPY_AND_ASSIGN(AOTCompilationStats);
 };
@@ -802,7 +808,7 @@
           ObjPtr<mirror::Class> klass =
               class_linker->LookupResolvedType(type_index,
                                                dex_cache.Get(),
-                                               /* class_loader */ nullptr);
+                                               /* class_loader= */ nullptr);
           CHECK(klass != nullptr) << descriptor << " should have been previously resolved.";
           // Now assign the bitstring if the class is not final. Keep this in sync with sharpening.
           if (!klass->IsFinal()) {
@@ -1191,7 +1197,7 @@
   // Visitor for VisitReferences.
   void operator()(ObjPtr<mirror::Object> object,
                   MemberOffset field_offset,
-                  bool /* is_static */) const
+                  bool is_static ATTRIBUTE_UNUSED) const
       REQUIRES_SHARED(Locks::mutator_lock_) {
     mirror::Object* ref = object->GetFieldObject<mirror::Object>(field_offset);
     if (ref != nullptr) {
@@ -1231,8 +1237,15 @@
     bool operator()(ObjPtr<mirror::Class> klass) override REQUIRES_SHARED(Locks::mutator_lock_) {
       std::string temp;
       StringPiece name(klass->GetDescriptor(&temp));
-      if (data_->image_class_descriptors_->find(name) != data_->image_class_descriptors_->end()) {
-        data_->image_classes_.push_back(hs_.NewHandle(klass));
+      auto it = data_->image_class_descriptors_->find(name);
+      if (it != data_->image_class_descriptors_->end()) {
+        if (LIKELY(klass->IsResolved())) {
+          data_->image_classes_.push_back(hs_.NewHandle(klass));
+        } else {
+          DCHECK(klass->IsErroneousUnresolved());
+          VLOG(compiler) << "Removing unresolved class from image classes: " << name;
+          data_->image_class_descriptors_->erase(it);
+        }
       } else {
         // Check whether it is initialized and has a clinit. They must be kept, too.
         if (klass->IsInitialized() && klass->FindClassInitializer(
@@ -1354,7 +1367,7 @@
   Handle<mirror::DexCache> dex_cache(mUnit->GetDexCache());
   {
     Handle<mirror::ClassLoader> class_loader = mUnit->GetClassLoader();
-    resolved_field = ResolveField(soa, dex_cache, class_loader, field_idx, /* is_static */ false);
+    resolved_field = ResolveField(soa, dex_cache, class_loader, field_idx, /* is_static= */ false);
     referrer_class = resolved_field != nullptr
         ? ResolveCompilingMethodsClass(soa, dex_cache, class_loader, mUnit) : nullptr;
   }
@@ -2095,8 +2108,11 @@
     Handle<mirror::Class> klass(
         hs.NewHandle(manager_->GetClassLinker()->FindClass(soa.Self(), descriptor, class_loader)));
 
-    if (klass != nullptr && !SkipClass(manager_->GetClassLoader(), dex_file, klass.Get())) {
-      TryInitializeClass(klass, class_loader);
+    if (klass != nullptr) {
+      if (!SkipClass(manager_->GetClassLoader(), dex_file, klass.Get())) {
+        TryInitializeClass(klass, class_loader);
+      }
+      manager_->GetCompiler()->stats_->AddClassStatus(klass->GetStatus());
     }
     // Clear any class not found or verification exceptions.
     soa.Self()->ClearException();
@@ -2531,7 +2547,7 @@
   }
   if (GetCompilerOptions().IsBootImage()) {
     // Prune garbage objects created during aborted transactions.
-    Runtime::Current()->GetHeap()->CollectGarbage(/* clear_soft_references */ true);
+    Runtime::Current()->GetHeap()->CollectGarbage(/* clear_soft_references= */ true);
   }
 }
 
diff --git a/compiler/driver/compiler_driver.h b/compiler/driver/compiler_driver.h
index 7c0fc64..025a632 100644
--- a/compiler/driver/compiler_driver.h
+++ b/compiler/driver/compiler_driver.h
@@ -378,6 +378,7 @@
   friend class CommonCompilerTest;
   friend class CompileClassVisitor;
   friend class DexToDexDecompilerTest;
+  friend class InitializeClassVisitor;
   friend class verifier::VerifierDepsTest;
   DISALLOW_COPY_AND_ASSIGN(CompilerDriver);
 };
diff --git a/compiler/exception_test.cc b/compiler/exception_test.cc
index 80c0a68..f0f179c 100644
--- a/compiler/exception_test.cc
+++ b/compiler/exception_test.cc
@@ -187,14 +187,14 @@
   }
 
   fake_stack.push_back(method_g_->GetOatQuickMethodHeader(0)->ToNativeQuickPc(
-      method_g_, kDexPc, /* is_catch_handler */ false));  // return pc
+      method_g_, kDexPc, /* is_for_catch_handler= */ false));  // return pc
 
   // Create/push fake 16byte stack frame for method g
   fake_stack.push_back(reinterpret_cast<uintptr_t>(method_g_));
   fake_stack.push_back(0);
   fake_stack.push_back(0);
   fake_stack.push_back(method_g_->GetOatQuickMethodHeader(0)->ToNativeQuickPc(
-      method_g_, kDexPc, /* is_catch_handler */ false));  // return pc
+      method_g_, kDexPc, /* is_for_catch_handler= */ false));  // return pc
 
   // Create/push fake 16byte stack frame for method f
   fake_stack.push_back(reinterpret_cast<uintptr_t>(method_f_));
diff --git a/compiler/jit/jit_compiler.cc b/compiler/jit/jit_compiler.cc
index e57bbfa..0d35fec 100644
--- a/compiler/jit/jit_compiler.cc
+++ b/compiler/jit/jit_compiler.cc
@@ -146,7 +146,10 @@
     // (which would have been otherwise used as identifier to remove it later).
     AddNativeDebugInfoForJit(Thread::Current(),
                              /*code_ptr=*/ nullptr,
-                             elf_file);
+                             elf_file,
+                             debug::PackElfFileForJIT,
+                             compiler_options.GetInstructionSet(),
+                             compiler_options.GetInstructionSetFeatures());
   }
 }
 
@@ -169,8 +172,8 @@
   compiler_driver_.reset(new CompilerDriver(
       compiler_options_.get(),
       Compiler::kOptimizing,
-      /* thread_count */ 1,
-      /* swap_fd */ -1));
+      /* thread_count= */ 1,
+      /* swap_fd= */ -1));
   // Disable dedupe so we can remove compiled methods.
   compiler_driver_->SetDedupeEnabled(false);
 }
diff --git a/compiler/jni/jni_cfi_test.cc b/compiler/jni/jni_cfi_test.cc
index 920a3a8..b19a2b8 100644
--- a/compiler/jni/jni_cfi_test.cc
+++ b/compiler/jni/jni_cfi_test.cc
@@ -86,7 +86,7 @@
                         callee_save_regs, mr_conv->EntrySpills());
     jni_asm->IncreaseFrameSize(32);
     jni_asm->DecreaseFrameSize(32);
-    jni_asm->RemoveFrame(frame_size, callee_save_regs, /* may_suspend */ true);
+    jni_asm->RemoveFrame(frame_size, callee_save_regs, /* may_suspend= */ true);
     jni_asm->FinalizeCode();
     std::vector<uint8_t> actual_asm(jni_asm->CodeSize());
     MemoryRegion code(&actual_asm[0], actual_asm.size());
diff --git a/compiler/jni/jni_compiler_test.cc b/compiler/jni/jni_compiler_test.cc
index bd4304c..3c68389 100644
--- a/compiler/jni/jni_compiler_test.cc
+++ b/compiler/jni/jni_compiler_test.cc
@@ -2196,7 +2196,7 @@
 // Methods not annotated with anything are not considered "fast native"
 // -- Check that the annotation lookup does not find it.
 void JniCompilerTest::NormalNativeImpl() {
-  SetUpForTest(/* direct */ true,
+  SetUpForTest(/* direct= */ true,
                "normalNative",
                "()V",
                CURRENT_JNI_WRAPPER(Java_MyClassNatives_normalNative));
@@ -2218,7 +2218,7 @@
 }
 
 void JniCompilerTest::FastNativeImpl() {
-  SetUpForTest(/* direct */ true,
+  SetUpForTest(/* direct= */ true,
                "fastNative",
                "()V",
                CURRENT_JNI_WRAPPER(Java_MyClassNatives_fastNative));
@@ -2241,7 +2241,7 @@
 }
 
 void JniCompilerTest::CriticalNativeImpl() {
-  SetUpForTest(/* direct */ true,
+  SetUpForTest(/* direct= */ true,
                // Important: Don't change the "current jni" yet to avoid a method name suffix.
                "criticalNative",
                "()V",
diff --git a/compiler/jni/quick/jni_compiler.cc b/compiler/jni/quick/jni_compiler.cc
index 09376dd..bdbf429 100644
--- a/compiler/jni/quick/jni_compiler.cc
+++ b/compiler/jni/quick/jni_compiler.cc
@@ -151,7 +151,7 @@
     // Don't allow both @FastNative and @CriticalNative. They are mutually exclusive.
     if (UNLIKELY(is_fast_native && is_critical_native)) {
       LOG(FATAL) << "JniCompile: Method cannot be both @CriticalNative and @FastNative"
-                 << dex_file.PrettyMethod(method_idx, /* with_signature */ true);
+                 << dex_file.PrettyMethod(method_idx, /* with_signature= */ true);
     }
 
     // @CriticalNative - extra checks:
@@ -162,15 +162,15 @@
       CHECK(is_static)
           << "@CriticalNative functions cannot be virtual since that would"
           << "require passing a reference parameter (this), which is illegal "
-          << dex_file.PrettyMethod(method_idx, /* with_signature */ true);
+          << dex_file.PrettyMethod(method_idx, /* with_signature= */ true);
       CHECK(!is_synchronized)
           << "@CriticalNative functions cannot be synchronized since that would"
           << "require passing a (class and/or this) reference parameter, which is illegal "
-          << dex_file.PrettyMethod(method_idx, /* with_signature */ true);
+          << dex_file.PrettyMethod(method_idx, /* with_signature= */ true);
       for (size_t i = 0; i < strlen(shorty); ++i) {
         CHECK_NE(Primitive::kPrimNot, Primitive::GetType(shorty[i]))
             << "@CriticalNative methods' shorty types must not have illegal references "
-            << dex_file.PrettyMethod(method_idx, /* with_signature */ true);
+            << dex_file.PrettyMethod(method_idx, /* with_signature= */ true);
       }
     }
   }
@@ -632,7 +632,7 @@
   __ DecreaseFrameSize(current_out_arg_size);
 
   // 15. Process pending exceptions from JNI call or monitor exit.
-  __ ExceptionPoll(main_jni_conv->InterproceduralScratchRegister(), 0 /* stack_adjust */);
+  __ ExceptionPoll(main_jni_conv->InterproceduralScratchRegister(), 0 /* stack_adjust= */);
 
   // 16. Remove activation - need to restore callee save registers since the GC may have changed
   //     them.
diff --git a/compiler/linker/elf_builder.h b/compiler/linker/elf_builder.h
index 44f3296..6acce10 100644
--- a/compiler/linker/elf_builder.h
+++ b/compiler/linker/elf_builder.h
@@ -282,10 +282,10 @@
                         name,
                         SHT_STRTAB,
                         flags,
-                        /* link */ nullptr,
-                        /* info */ 0,
+                        /* link= */ nullptr,
+                        /* info= */ 0,
                         align,
-                        /* entsize */ 0) { }
+                        /* entsize= */ 0) { }
 
     Elf_Word Add(const std::string& name) {
       if (CachedSection::GetCacheSize() == 0u) {
@@ -306,10 +306,10 @@
                   name,
                   SHT_STRTAB,
                   flags,
-                  /* link */ nullptr,
-                  /* info */ 0,
+                  /* link= */ nullptr,
+                  /* info= */ 0,
                   align,
-                  /* entsize */ 0) {
+                  /* entsize= */ 0) {
       Reset();
     }
 
@@ -351,7 +351,7 @@
                   type,
                   flags,
                   strtab,
-                  /* info */ 1,
+                  /* info= */ 1,
                   sizeof(Elf_Off),
                   sizeof(Elf_Sym)) {
       syms_.push_back(Elf_Sym());  // The symbol table always has to start with NULL symbol.
@@ -768,7 +768,7 @@
       // The runtime does not care about the size of this symbol (it uses the "lastword" symbol).
       // We use size 0 (meaning "unknown size" in ELF) to prevent overlap with the debug symbols.
       Elf_Word oatexec = dynstr_.Add("oatexec");
-      dynsym_.Add(oatexec, &text_, text_.GetAddress(), /* size */ 0, STB_GLOBAL, STT_OBJECT);
+      dynsym_.Add(oatexec, &text_, text_.GetAddress(), /* size= */ 0, STB_GLOBAL, STT_OBJECT);
       Elf_Word oatlastword = dynstr_.Add("oatlastword");
       Elf_Word oatlastword_address = text_.GetAddress() + text_size - 4;
       dynsym_.Add(oatlastword, &text_, oatlastword_address, 4, STB_GLOBAL, STT_OBJECT);
@@ -824,7 +824,7 @@
     }
     if (dex_size != 0u) {
       Elf_Word oatdex = dynstr_.Add("oatdex");
-      dynsym_.Add(oatdex, &dex_, dex_.GetAddress(), /* size */ 0, STB_GLOBAL, STT_OBJECT);
+      dynsym_.Add(oatdex, &dex_, dex_.GetAddress(), /* size= */ 0, STB_GLOBAL, STT_OBJECT);
       Elf_Word oatdexlastword = dynstr_.Add("oatdexlastword");
       Elf_Word oatdexlastword_address = dex_.GetAddress() + dex_size - 4;
       dynsym_.Add(oatdexlastword, &dex_, oatdexlastword_address, 4, STB_GLOBAL, STT_OBJECT);
diff --git a/compiler/linker/linker_patch.h b/compiler/linker/linker_patch.h
index 5e1615f..f9e3930 100644
--- a/compiler/linker/linker_patch.h
+++ b/compiler/linker/linker_patch.h
@@ -58,7 +58,7 @@
   static LinkerPatch IntrinsicReferencePatch(size_t literal_offset,
                                              uint32_t pc_insn_offset,
                                              uint32_t intrinsic_data) {
-    LinkerPatch patch(literal_offset, Type::kIntrinsicReference, /* target_dex_file */ nullptr);
+    LinkerPatch patch(literal_offset, Type::kIntrinsicReference, /* target_dex_file= */ nullptr);
     patch.intrinsic_data_ = intrinsic_data;
     patch.pc_insn_offset_ = pc_insn_offset;
     return patch;
@@ -67,7 +67,7 @@
   static LinkerPatch DataBimgRelRoPatch(size_t literal_offset,
                                         uint32_t pc_insn_offset,
                                         uint32_t boot_image_offset) {
-    LinkerPatch patch(literal_offset, Type::kDataBimgRelRo, /* target_dex_file */ nullptr);
+    LinkerPatch patch(literal_offset, Type::kDataBimgRelRo, /* target_dex_file= */ nullptr);
     patch.boot_image_offset_ = boot_image_offset;
     patch.pc_insn_offset_ = pc_insn_offset;
     return patch;
@@ -144,7 +144,9 @@
   static LinkerPatch BakerReadBarrierBranchPatch(size_t literal_offset,
                                                  uint32_t custom_value1 = 0u,
                                                  uint32_t custom_value2 = 0u) {
-    LinkerPatch patch(literal_offset, Type::kBakerReadBarrierBranch, /* target_dex_file */ nullptr);
+    LinkerPatch patch(literal_offset,
+                      Type::kBakerReadBarrierBranch,
+                      /* target_dex_file= */ nullptr);
     patch.baker_custom_value1_ = custom_value1;
     patch.baker_custom_value2_ = custom_value2;
     return patch;
diff --git a/compiler/optimizing/block_builder.cc b/compiler/optimizing/block_builder.cc
index d9df23f..d1ccbee 100644
--- a/compiler/optimizing/block_builder.cc
+++ b/compiler/optimizing/block_builder.cc
@@ -415,7 +415,7 @@
   // Create blocks.
   HBasicBlock* entry_block = new (allocator_) HBasicBlock(graph_, kNoDexPc);
   HBasicBlock* exit_block = new (allocator_) HBasicBlock(graph_, kNoDexPc);
-  HBasicBlock* body = MaybeCreateBlockAt(/* semantic_dex_pc */ kNoDexPc, /* store_dex_pc */ 0u);
+  HBasicBlock* body = MaybeCreateBlockAt(/* semantic_dex_pc= */ kNoDexPc, /* store_dex_pc= */ 0u);
 
   // Add blocks to the graph.
   graph_->AddBlock(entry_block);
diff --git a/compiler/optimizing/bounds_check_elimination.cc b/compiler/optimizing/bounds_check_elimination.cc
index 1c3660c..54a1ae9 100644
--- a/compiler/optimizing/bounds_check_elimination.cc
+++ b/compiler/optimizing/bounds_check_elimination.cc
@@ -1634,7 +1634,7 @@
         HBasicBlock* block = GetPreHeader(loop, check);
         HInstruction* cond =
             new (GetGraph()->GetAllocator()) HEqual(array, GetGraph()->GetNullConstant());
-        InsertDeoptInLoop(loop, block, cond, /* is_null_check */ true);
+        InsertDeoptInLoop(loop, block, cond, /* is_null_check= */ true);
         ReplaceInstruction(check, array);
         return true;
       }
diff --git a/compiler/optimizing/bounds_check_elimination_test.cc b/compiler/optimizing/bounds_check_elimination_test.cc
index e15161e..5927d68 100644
--- a/compiler/optimizing/bounds_check_elimination_test.cc
+++ b/compiler/optimizing/bounds_check_elimination_test.cc
@@ -43,7 +43,7 @@
   void RunBCE() {
     graph_->BuildDominatorTree();
 
-    InstructionSimplifier(graph_, /* codegen */ nullptr).Run();
+    InstructionSimplifier(graph_, /* codegen= */ nullptr).Run();
 
     SideEffectsAnalysis side_effects(graph_);
     side_effects.Run();
diff --git a/compiler/optimizing/code_generator.cc b/compiler/optimizing/code_generator.cc
index 2184f99..04e0cc4 100644
--- a/compiler/optimizing/code_generator.cc
+++ b/compiler/optimizing/code_generator.cc
@@ -414,7 +414,7 @@
     // This ensures that we have correct native line mapping for all native instructions.
     // It is necessary to make stepping over a statement work. Otherwise, any initial
     // instructions (e.g. moves) would be assumed to be the start of next statement.
-    MaybeRecordNativeDebugInfo(nullptr /* instruction */, block->GetDexPc());
+    MaybeRecordNativeDebugInfo(/* instruction= */ nullptr, block->GetDexPc());
     for (HInstructionIterator it(block->GetInstructions()); !it.Done(); it.Advance()) {
       HInstruction* current = it.Current();
       if (current->HasEnvironment()) {
@@ -1085,7 +1085,7 @@
     // call). Therefore register_mask contains both callee-save and caller-save
     // registers that hold objects. We must remove the spilled caller-save from the
     // mask, since they will be overwritten by the callee.
-    uint32_t spills = GetSlowPathSpills(locations, /* core_registers */ true);
+    uint32_t spills = GetSlowPathSpills(locations, /* core_registers= */ true);
     register_mask &= ~spills;
   } else {
     // The register mask must be a subset of callee-save registers.
@@ -1164,7 +1164,7 @@
       // Ensure that we do not collide with the stack map of the previous instruction.
       GenerateNop();
     }
-    RecordPcInfo(instruction, dex_pc, slow_path, /* native_debug_info */ true);
+    RecordPcInfo(instruction, dex_pc, slow_path, /* native_debug_info= */ true);
   }
 }
 
@@ -1182,8 +1182,8 @@
 
     stack_map_stream->BeginStackMapEntry(dex_pc,
                                          native_pc,
-                                         /* register_mask */ 0,
-                                         /* stack_mask */ nullptr,
+                                         /* register_mask= */ 0,
+                                         /* sp_mask= */ nullptr,
                                          StackMap::Kind::Catch);
 
     HInstruction* current_phi = block->GetFirstPhi();
@@ -1555,7 +1555,7 @@
 void SlowPathCode::SaveLiveRegisters(CodeGenerator* codegen, LocationSummary* locations) {
   size_t stack_offset = codegen->GetFirstRegisterSlotInSlowPath();
 
-  const uint32_t core_spills = codegen->GetSlowPathSpills(locations, /* core_registers */ true);
+  const uint32_t core_spills = codegen->GetSlowPathSpills(locations, /* core_registers= */ true);
   for (uint32_t i : LowToHighBits(core_spills)) {
     // If the register holds an object, update the stack mask.
     if (locations->RegisterContainsObject(i)) {
@@ -1567,7 +1567,7 @@
     stack_offset += codegen->SaveCoreRegister(stack_offset, i);
   }
 
-  const uint32_t fp_spills = codegen->GetSlowPathSpills(locations, /* core_registers */ false);
+  const uint32_t fp_spills = codegen->GetSlowPathSpills(locations, /* core_registers= */ false);
   for (uint32_t i : LowToHighBits(fp_spills)) {
     DCHECK_LT(stack_offset, codegen->GetFrameSize() - codegen->FrameEntrySpillSize());
     DCHECK_LT(i, kMaximumNumberOfExpectedRegisters);
@@ -1579,14 +1579,14 @@
 void SlowPathCode::RestoreLiveRegisters(CodeGenerator* codegen, LocationSummary* locations) {
   size_t stack_offset = codegen->GetFirstRegisterSlotInSlowPath();
 
-  const uint32_t core_spills = codegen->GetSlowPathSpills(locations, /* core_registers */ true);
+  const uint32_t core_spills = codegen->GetSlowPathSpills(locations, /* core_registers= */ true);
   for (uint32_t i : LowToHighBits(core_spills)) {
     DCHECK_LT(stack_offset, codegen->GetFrameSize() - codegen->FrameEntrySpillSize());
     DCHECK_LT(i, kMaximumNumberOfExpectedRegisters);
     stack_offset += codegen->RestoreCoreRegister(stack_offset, i);
   }
 
-  const uint32_t fp_spills = codegen->GetSlowPathSpills(locations, /* core_registers */ false);
+  const uint32_t fp_spills = codegen->GetSlowPathSpills(locations, /* core_registers= */ false);
   for (uint32_t i : LowToHighBits(fp_spills)) {
     DCHECK_LT(stack_offset, codegen->GetFrameSize() - codegen->FrameEntrySpillSize());
     DCHECK_LT(i, kMaximumNumberOfExpectedRegisters);
diff --git a/compiler/optimizing/code_generator_arm64.cc b/compiler/optimizing/code_generator_arm64.cc
index 9e2fd9e..ff99a3e 100644
--- a/compiler/optimizing/code_generator_arm64.cc
+++ b/compiler/optimizing/code_generator_arm64.cc
@@ -168,8 +168,8 @@
                                            LocationSummary* locations,
                                            int64_t spill_offset,
                                            bool is_save) {
-  const uint32_t core_spills = codegen->GetSlowPathSpills(locations, /* core_registers */ true);
-  const uint32_t fp_spills = codegen->GetSlowPathSpills(locations, /* core_registers */ false);
+  const uint32_t core_spills = codegen->GetSlowPathSpills(locations, /* core_registers= */ true);
+  const uint32_t fp_spills = codegen->GetSlowPathSpills(locations, /* core_registers= */ false);
   DCHECK(ArtVixlRegCodeCoherentForRegSet(core_spills,
                                          codegen->GetNumberOfCoreRegisters(),
                                          fp_spills,
@@ -212,7 +212,7 @@
 
 void SlowPathCodeARM64::SaveLiveRegisters(CodeGenerator* codegen, LocationSummary* locations) {
   size_t stack_offset = codegen->GetFirstRegisterSlotInSlowPath();
-  const uint32_t core_spills = codegen->GetSlowPathSpills(locations, /* core_registers */ true);
+  const uint32_t core_spills = codegen->GetSlowPathSpills(locations, /* core_registers= */ true);
   for (uint32_t i : LowToHighBits(core_spills)) {
     // If the register holds an object, update the stack mask.
     if (locations->RegisterContainsObject(i)) {
@@ -224,7 +224,7 @@
     stack_offset += kXRegSizeInBytes;
   }
 
-  const uint32_t fp_spills = codegen->GetSlowPathSpills(locations, /* core_registers */ false);
+  const uint32_t fp_spills = codegen->GetSlowPathSpills(locations, /* core_registers= */ false);
   for (uint32_t i : LowToHighBits(fp_spills)) {
     DCHECK_LT(stack_offset, codegen->GetFrameSize() - codegen->FrameEntrySpillSize());
     DCHECK_LT(i, kMaximumNumberOfExpectedRegisters);
@@ -234,13 +234,13 @@
 
   SaveRestoreLiveRegistersHelper(codegen,
                                  locations,
-                                 codegen->GetFirstRegisterSlotInSlowPath(), true /* is_save */);
+                                 codegen->GetFirstRegisterSlotInSlowPath(), /* is_save= */ true);
 }
 
 void SlowPathCodeARM64::RestoreLiveRegisters(CodeGenerator* codegen, LocationSummary* locations) {
   SaveRestoreLiveRegistersHelper(codegen,
                                  locations,
-                                 codegen->GetFirstRegisterSlotInSlowPath(), false /* is_save */);
+                                 codegen->GetFirstRegisterSlotInSlowPath(), /* is_save= */ false);
 }
 
 class BoundsCheckSlowPathARM64 : public SlowPathCodeARM64 {
@@ -926,7 +926,7 @@
     uint32_t encoded_data = entry.first;
     vixl::aarch64::Label* slow_path_entry = &entry.second.label;
     __ Bind(slow_path_entry);
-    CompileBakerReadBarrierThunk(*GetAssembler(), encoded_data, /* debug_name */ nullptr);
+    CompileBakerReadBarrierThunk(*GetAssembler(), encoded_data, /* debug_name= */ nullptr);
   }
 
   // Ensure we emit the literal pool.
@@ -1118,7 +1118,7 @@
     }
   }
 
-  MaybeGenerateMarkingRegisterCheck(/* code */ __LINE__);
+  MaybeGenerateMarkingRegisterCheck(/* code= */ __LINE__);
 }
 
 void CodeGeneratorARM64::GenerateFrameExit() {
@@ -1888,7 +1888,7 @@
         base,
         offset,
         maybe_temp,
-        /* needs_null_check */ true,
+        /* needs_null_check= */ true,
         field_info.IsVolatile());
   } else {
     // General case.
@@ -1897,7 +1897,7 @@
       // CodeGeneratorARM64::LoadAcquire call.
       // NB: LoadAcquire will record the pc info if needed.
       codegen_->LoadAcquire(
-          instruction, OutputCPURegister(instruction), field, /* needs_null_check */ true);
+          instruction, OutputCPURegister(instruction), field, /* needs_null_check= */ true);
     } else {
       // Ensure that between load and MaybeRecordImplicitNullCheck there are no pools emitted.
       EmissionCheckScope guard(GetVIXLAssembler(), kMaxMacroInstructionSizeInBytes);
@@ -1952,7 +1952,7 @@
 
     if (field_info.IsVolatile()) {
       codegen_->StoreRelease(
-          instruction, field_type, source, HeapOperand(obj, offset), /* needs_null_check */ true);
+          instruction, field_type, source, HeapOperand(obj, offset), /* needs_null_check= */ true);
     } else {
       // Ensure that between store and MaybeRecordImplicitNullCheck there are no pools emitted.
       EmissionCheckScope guard(GetVIXLAssembler(), kMaxMacroInstructionSizeInBytes);
@@ -2376,11 +2376,11 @@
                                                       obj.W(),
                                                       offset,
                                                       maybe_temp,
-                                                      /* needs_null_check */ false,
-                                                      /* use_load_acquire */ false);
+                                                      /* needs_null_check= */ false,
+                                                      /* use_load_acquire= */ false);
     } else {
       codegen_->GenerateArrayLoadWithBakerReadBarrier(
-          instruction, out, obj.W(), offset, index, /* needs_null_check */ false);
+          instruction, out, obj.W(), offset, index, /* needs_null_check= */ false);
     }
   } else {
     // General case.
@@ -2925,7 +2925,7 @@
   int64_t magic;
   int shift;
   CalculateMagicAndShiftForDivRem(
-      imm, type == DataType::Type::kInt64 /* is_long */, &magic, &shift);
+      imm, /* is_long= */ type == DataType::Type::kInt64, &magic, &shift);
 
   UseScratchRegisterScope temps(GetVIXLAssembler());
   Register temp = temps.AcquireSameSizeAs(out);
@@ -3116,7 +3116,7 @@
   }
   if (block->IsEntryBlock() && (previous != nullptr) && previous->IsSuspendCheck()) {
     GenerateSuspendCheck(previous->AsSuspendCheck(), nullptr);
-    codegen_->MaybeGenerateMarkingRegisterCheck(/* code */ __LINE__);
+    codegen_->MaybeGenerateMarkingRegisterCheck(/* code= */ __LINE__);
   }
   if (!codegen_->GoesToNextBlock(block, successor)) {
     __ B(codegen_->GetLabelOf(successor));
@@ -3266,7 +3266,7 @@
   if (codegen_->GoesToNextBlock(if_instr->GetBlock(), false_successor)) {
     false_target = nullptr;
   }
-  GenerateTestAndBranch(if_instr, /* condition_input_index */ 0, true_target, false_target);
+  GenerateTestAndBranch(if_instr, /* condition_input_index= */ 0, true_target, false_target);
 }
 
 void LocationsBuilderARM64::VisitDeoptimize(HDeoptimize* deoptimize) {
@@ -3285,9 +3285,9 @@
   SlowPathCodeARM64* slow_path =
       deopt_slow_paths_.NewSlowPath<DeoptimizationSlowPathARM64>(deoptimize);
   GenerateTestAndBranch(deoptimize,
-                        /* condition_input_index */ 0,
+                        /* condition_input_index= */ 0,
                         slow_path->GetEntryLabel(),
-                        /* false_target */ nullptr);
+                        /* false_target= */ nullptr);
 }
 
 void LocationsBuilderARM64::VisitShouldDeoptimizeFlag(HShouldDeoptimizeFlag* flag) {
@@ -3627,7 +3627,7 @@
       __ Cmp(out, cls);
       DCHECK(locations->OnlyCallsOnSlowPath());
       slow_path = new (codegen_->GetScopedAllocator()) TypeCheckSlowPathARM64(
-          instruction, /* is_fatal */ false);
+          instruction, /* is_fatal= */ false);
       codegen_->AddSlowPath(slow_path);
       __ B(ne, slow_path->GetEntryLabel());
       __ Mov(out, 1);
@@ -3659,7 +3659,7 @@
       // This should also be beneficial for the other cases above.
       DCHECK(locations->OnlyCallsOnSlowPath());
       slow_path = new (codegen_->GetScopedAllocator()) TypeCheckSlowPathARM64(
-          instruction, /* is_fatal */ false);
+          instruction, /* is_fatal= */ false);
       codegen_->AddSlowPath(slow_path);
       __ B(slow_path->GetEntryLabel());
       if (zero.IsLinked()) {
@@ -3952,7 +3952,7 @@
 
 void InstructionCodeGeneratorARM64::VisitInvokeUnresolved(HInvokeUnresolved* invoke) {
   codegen_->GenerateInvokeUnresolvedRuntimeCall(invoke);
-  codegen_->MaybeGenerateMarkingRegisterCheck(/* code */ __LINE__);
+  codegen_->MaybeGenerateMarkingRegisterCheck(/* code= */ __LINE__);
 }
 
 void LocationsBuilderARM64::HandleInvoke(HInvoke* invoke) {
@@ -4022,7 +4022,7 @@
     codegen_->RecordPcInfo(invoke, invoke->GetDexPc());
   }
 
-  codegen_->MaybeGenerateMarkingRegisterCheck(/* code */ __LINE__);
+  codegen_->MaybeGenerateMarkingRegisterCheck(/* code= */ __LINE__);
 }
 
 void LocationsBuilderARM64::VisitInvokeVirtual(HInvokeVirtual* invoke) {
@@ -4201,7 +4201,7 @@
 
 void InstructionCodeGeneratorARM64::VisitInvokePolymorphic(HInvokePolymorphic* invoke) {
   codegen_->GenerateInvokePolymorphicCall(invoke);
-  codegen_->MaybeGenerateMarkingRegisterCheck(/* code */ __LINE__);
+  codegen_->MaybeGenerateMarkingRegisterCheck(/* code= */ __LINE__);
 }
 
 void LocationsBuilderARM64::VisitInvokeCustom(HInvokeCustom* invoke) {
@@ -4210,21 +4210,21 @@
 
 void InstructionCodeGeneratorARM64::VisitInvokeCustom(HInvokeCustom* invoke) {
   codegen_->GenerateInvokeCustomCall(invoke);
-  codegen_->MaybeGenerateMarkingRegisterCheck(/* code */ __LINE__);
+  codegen_->MaybeGenerateMarkingRegisterCheck(/* code= */ __LINE__);
 }
 
 vixl::aarch64::Label* CodeGeneratorARM64::NewBootImageIntrinsicPatch(
     uint32_t intrinsic_data,
     vixl::aarch64::Label* adrp_label) {
   return NewPcRelativePatch(
-      /* dex_file */ nullptr, intrinsic_data, adrp_label, &boot_image_intrinsic_patches_);
+      /* dex_file= */ nullptr, intrinsic_data, adrp_label, &boot_image_intrinsic_patches_);
 }
 
 vixl::aarch64::Label* CodeGeneratorARM64::NewBootImageRelRoPatch(
     uint32_t boot_image_offset,
     vixl::aarch64::Label* adrp_label) {
   return NewPcRelativePatch(
-      /* dex_file */ nullptr, boot_image_offset, adrp_label, &boot_image_method_patches_);
+      /* dex_file= */ nullptr, boot_image_offset, adrp_label, &boot_image_method_patches_);
 }
 
 vixl::aarch64::Label* CodeGeneratorARM64::NewBootImageMethodPatch(
@@ -4308,7 +4308,7 @@
   ReserveJitStringRoot(StringReference(&dex_file, string_index), handle);
   return jit_string_patches_.GetOrCreate(
       StringReference(&dex_file, string_index),
-      [this]() { return __ CreateLiteralDestroyedWithPool<uint32_t>(/* placeholder */ 0u); });
+      [this]() { return __ CreateLiteralDestroyedWithPool<uint32_t>(/* value= */ 0u); });
 }
 
 vixl::aarch64::Literal<uint32_t>* CodeGeneratorARM64::DeduplicateJitClassLiteral(
@@ -4316,7 +4316,7 @@
   ReserveJitClassRoot(TypeReference(&dex_file, type_index), handle);
   return jit_class_patches_.GetOrCreate(
       TypeReference(&dex_file, type_index),
-      [this]() { return __ CreateLiteralDestroyedWithPool<uint32_t>(/* placeholder */ 0u); });
+      [this]() { return __ CreateLiteralDestroyedWithPool<uint32_t>(/* value= */ 0u); });
 }
 
 void CodeGeneratorARM64::EmitAdrpPlaceholder(vixl::aarch64::Label* fixup_label,
@@ -4513,7 +4513,7 @@
   DCHECK(!invoke->IsStaticWithExplicitClinitCheck());
 
   if (TryGenerateIntrinsicCode(invoke, codegen_)) {
-    codegen_->MaybeGenerateMarkingRegisterCheck(/* code */ __LINE__);
+    codegen_->MaybeGenerateMarkingRegisterCheck(/* code= */ __LINE__);
     return;
   }
 
@@ -4526,12 +4526,12 @@
         invoke, locations->HasTemps() ? locations->GetTemp(0) : Location::NoLocation());
   }
 
-  codegen_->MaybeGenerateMarkingRegisterCheck(/* code */ __LINE__);
+  codegen_->MaybeGenerateMarkingRegisterCheck(/* code= */ __LINE__);
 }
 
 void InstructionCodeGeneratorARM64::VisitInvokeVirtual(HInvokeVirtual* invoke) {
   if (TryGenerateIntrinsicCode(invoke, codegen_)) {
-    codegen_->MaybeGenerateMarkingRegisterCheck(/* code */ __LINE__);
+    codegen_->MaybeGenerateMarkingRegisterCheck(/* code= */ __LINE__);
     return;
   }
 
@@ -4543,7 +4543,7 @@
     DCHECK(!codegen_->IsLeafMethod());
   }
 
-  codegen_->MaybeGenerateMarkingRegisterCheck(/* code */ __LINE__);
+  codegen_->MaybeGenerateMarkingRegisterCheck(/* code= */ __LINE__);
 }
 
 HLoadClass::LoadKind CodeGeneratorARM64::GetSupportedLoadClassKind(
@@ -4611,7 +4611,7 @@
   HLoadClass::LoadKind load_kind = cls->GetLoadKind();
   if (load_kind == HLoadClass::LoadKind::kRuntimeCall) {
     codegen_->GenerateLoadClassRuntimeCall(cls);
-    codegen_->MaybeGenerateMarkingRegisterCheck(/* code */ __LINE__);
+    codegen_->MaybeGenerateMarkingRegisterCheck(/* code= */ __LINE__);
     return;
   }
   DCHECK(!cls->NeedsAccessCheck());
@@ -4633,7 +4633,7 @@
                                         out_loc,
                                         current_method,
                                         ArtMethod::DeclaringClassOffset().Int32Value(),
-                                        /* fixup_label */ nullptr,
+                                        /* fixup_label= */ nullptr,
                                         read_barrier_option);
       break;
     }
@@ -4696,8 +4696,8 @@
       codegen_->GenerateGcRootFieldLoad(cls,
                                         out_loc,
                                         out.X(),
-                                        /* offset */ 0,
-                                        /* fixup_label */ nullptr,
+                                        /* offset= */ 0,
+                                        /* fixup_label= */ nullptr,
                                         read_barrier_option);
       break;
     }
@@ -4721,7 +4721,7 @@
     } else {
       __ Bind(slow_path->GetExitLabel());
     }
-    codegen_->MaybeGenerateMarkingRegisterCheck(/* code */ __LINE__);
+    codegen_->MaybeGenerateMarkingRegisterCheck(/* code= */ __LINE__);
   }
 }
 
@@ -4859,7 +4859,7 @@
       codegen_->AddSlowPath(slow_path);
       __ Cbz(out.X(), slow_path->GetEntryLabel());
       __ Bind(slow_path->GetExitLabel());
-      codegen_->MaybeGenerateMarkingRegisterCheck(/* code */ __LINE__);
+      codegen_->MaybeGenerateMarkingRegisterCheck(/* code= */ __LINE__);
       return;
     }
     case HLoadString::LoadKind::kJitBootImageAddress: {
@@ -4875,8 +4875,8 @@
       codegen_->GenerateGcRootFieldLoad(load,
                                         out_loc,
                                         out.X(),
-                                        /* offset */ 0,
-                                        /* fixup_label */ nullptr,
+                                        /* offset= */ 0,
+                                        /* fixup_label= */ nullptr,
                                         kCompilerReadBarrierOption);
       return;
     }
@@ -4890,7 +4890,7 @@
   __ Mov(calling_convention.GetRegisterAt(0).W(), load->GetStringIndex().index_);
   codegen_->InvokeRuntime(kQuickResolveString, load, load->GetDexPc());
   CheckEntrypointTypes<kQuickResolveString, void*, uint32_t>();
-  codegen_->MaybeGenerateMarkingRegisterCheck(/* code */ __LINE__);
+  codegen_->MaybeGenerateMarkingRegisterCheck(/* code= */ __LINE__);
 }
 
 void LocationsBuilderARM64::VisitLongConstant(HLongConstant* constant) {
@@ -4918,7 +4918,7 @@
   } else {
     CheckEntrypointTypes<kQuickUnlockObject, void, mirror::Object*>();
   }
-  codegen_->MaybeGenerateMarkingRegisterCheck(/* code */ __LINE__);
+  codegen_->MaybeGenerateMarkingRegisterCheck(/* code= */ __LINE__);
 }
 
 void LocationsBuilderARM64::VisitMul(HMul* mul) {
@@ -5013,7 +5013,7 @@
   QuickEntrypointEnum entrypoint = CodeGenerator::GetArrayAllocationEntrypoint(instruction);
   codegen_->InvokeRuntime(entrypoint, instruction, instruction->GetDexPc());
   CheckEntrypointTypes<kQuickAllocArrayResolved, void*, mirror::Class*, int32_t>();
-  codegen_->MaybeGenerateMarkingRegisterCheck(/* code */ __LINE__);
+  codegen_->MaybeGenerateMarkingRegisterCheck(/* code= */ __LINE__);
 }
 
 void LocationsBuilderARM64::VisitNewInstance(HNewInstance* instruction) {
@@ -5027,7 +5027,7 @@
 void InstructionCodeGeneratorARM64::VisitNewInstance(HNewInstance* instruction) {
   codegen_->InvokeRuntime(instruction->GetEntrypoint(), instruction, instruction->GetDexPc());
   CheckEntrypointTypes<kQuickAllocObjectWithChecks, void*, mirror::Class*>();
-  codegen_->MaybeGenerateMarkingRegisterCheck(/* code */ __LINE__);
+  codegen_->MaybeGenerateMarkingRegisterCheck(/* code= */ __LINE__);
 }
 
 void LocationsBuilderARM64::VisitNot(HNot* instruction) {
@@ -5502,7 +5502,7 @@
     return;
   }
   GenerateSuspendCheck(instruction, nullptr);
-  codegen_->MaybeGenerateMarkingRegisterCheck(/* code */ __LINE__);
+  codegen_->MaybeGenerateMarkingRegisterCheck(/* code= */ __LINE__);
 }
 
 void LocationsBuilderARM64::VisitThrow(HThrow* instruction) {
@@ -5715,8 +5715,8 @@
                                                       out_reg,
                                                       offset,
                                                       maybe_temp,
-                                                      /* needs_null_check */ false,
-                                                      /* use_load_acquire */ false);
+                                                      /* needs_null_check= */ false,
+                                                      /* use_load_acquire= */ false);
     } else {
       // Load with slow path based read barrier.
       // Save the value of `out` into `maybe_temp` before overwriting it
@@ -5756,8 +5756,8 @@
                                                       obj_reg,
                                                       offset,
                                                       maybe_temp,
-                                                      /* needs_null_check */ false,
-                                                      /* use_load_acquire */ false);
+                                                      /* needs_null_check= */ false,
+                                                      /* use_load_acquire= */ false);
     } else {
       // Load with slow path based read barrier.
       // /* HeapReference<Object> */ out = *(obj + offset)
@@ -5842,7 +5842,7 @@
     // Note that GC roots are not affected by heap poisoning, thus we
     // do not have to unpoison `root_reg` here.
   }
-  MaybeGenerateMarkingRegisterCheck(/* code */ __LINE__);
+  MaybeGenerateMarkingRegisterCheck(/* code= */ __LINE__);
 }
 
 void CodeGeneratorARM64::GenerateUnsafeCasOldValueMovWithBakerReadBarrier(
@@ -5931,7 +5931,7 @@
     }
     __ bind(&return_address);
   }
-  MaybeGenerateMarkingRegisterCheck(/* code */ __LINE__, /* temp_loc */ LocationFrom(ip1));
+  MaybeGenerateMarkingRegisterCheck(/* code= */ __LINE__, /* temp_loc= */ LocationFrom(ip1));
 }
 
 void CodeGeneratorARM64::GenerateFieldLoadWithBakerReadBarrier(HInstruction* instruction,
@@ -6039,7 +6039,7 @@
     }
     __ bind(&return_address);
   }
-  MaybeGenerateMarkingRegisterCheck(/* code */ __LINE__, /* temp_loc */ LocationFrom(ip1));
+  MaybeGenerateMarkingRegisterCheck(/* code= */ __LINE__, /* temp_loc= */ LocationFrom(ip1));
 }
 
 void CodeGeneratorARM64::MaybeGenerateMarkingRegisterCheck(int code, Location temp_loc) {
diff --git a/compiler/optimizing/code_generator_arm_vixl.cc b/compiler/optimizing/code_generator_arm_vixl.cc
index dad1813..8204f1e 100644
--- a/compiler/optimizing/code_generator_arm_vixl.cc
+++ b/compiler/optimizing/code_generator_arm_vixl.cc
@@ -319,7 +319,7 @@
   size_t stack_offset = codegen->GetFirstRegisterSlotInSlowPath();
   size_t orig_offset = stack_offset;
 
-  const uint32_t core_spills = codegen->GetSlowPathSpills(locations, /* core_registers */ true);
+  const uint32_t core_spills = codegen->GetSlowPathSpills(locations, /* core_registers= */ true);
   for (uint32_t i : LowToHighBits(core_spills)) {
     // If the register holds an object, update the stack mask.
     if (locations->RegisterContainsObject(i)) {
@@ -334,7 +334,7 @@
   CodeGeneratorARMVIXL* arm_codegen = down_cast<CodeGeneratorARMVIXL*>(codegen);
   arm_codegen->GetAssembler()->StoreRegisterList(core_spills, orig_offset);
 
-  uint32_t fp_spills = codegen->GetSlowPathSpills(locations, /* core_registers */ false);
+  uint32_t fp_spills = codegen->GetSlowPathSpills(locations, /* core_registers= */ false);
   orig_offset = stack_offset;
   for (uint32_t i : LowToHighBits(fp_spills)) {
     DCHECK_LT(i, kMaximumNumberOfExpectedRegisters);
@@ -357,7 +357,7 @@
   size_t stack_offset = codegen->GetFirstRegisterSlotInSlowPath();
   size_t orig_offset = stack_offset;
 
-  const uint32_t core_spills = codegen->GetSlowPathSpills(locations, /* core_registers */ true);
+  const uint32_t core_spills = codegen->GetSlowPathSpills(locations, /* core_registers= */ true);
   for (uint32_t i : LowToHighBits(core_spills)) {
     DCHECK_LT(stack_offset, codegen->GetFrameSize() - codegen->FrameEntrySpillSize());
     DCHECK_LT(i, kMaximumNumberOfExpectedRegisters);
@@ -368,7 +368,7 @@
   CodeGeneratorARMVIXL* arm_codegen = down_cast<CodeGeneratorARMVIXL*>(codegen);
   arm_codegen->GetAssembler()->LoadRegisterList(core_spills, orig_offset);
 
-  uint32_t fp_spills = codegen->GetSlowPathSpills(locations, /* core_registers */ false);
+  uint32_t fp_spills = codegen->GetSlowPathSpills(locations, /* core_registers= */ false);
   while (fp_spills != 0u) {
     uint32_t begin = CTZ(fp_spills);
     uint32_t tmp = fp_spills + (1u << begin);
@@ -1539,7 +1539,7 @@
     vixl32::Label done_label;
     vixl32::Label* const final_label = codegen->GetFinalLabel(cond, &done_label);
 
-    __ B(condition.second, final_label, /* far_target */ false);
+    __ B(condition.second, final_label, /* is_far_target= */ false);
     __ Mov(out, 1);
 
     if (done_label.IsReferenced()) {
@@ -1934,7 +1934,7 @@
     uint32_t encoded_data = entry.first;
     vixl::aarch32::Label* slow_path_entry = &entry.second.label;
     __ Bind(slow_path_entry);
-    CompileBakerReadBarrierThunk(*GetAssembler(), encoded_data, /* debug_name */ nullptr);
+    CompileBakerReadBarrierThunk(*GetAssembler(), encoded_data, /* debug_name= */ nullptr);
   }
 
   GetAssembler()->FinalizeCode();
@@ -2159,7 +2159,7 @@
     GetAssembler()->StoreToOffset(kStoreWord, temp, sp, GetStackOffsetOfShouldDeoptimizeFlag());
   }
 
-  MaybeGenerateMarkingRegisterCheck(/* code */ 1);
+  MaybeGenerateMarkingRegisterCheck(/* code= */ 1);
 }
 
 void CodeGeneratorARMVIXL::GenerateFrameExit() {
@@ -2427,7 +2427,7 @@
   }
   if (block->IsEntryBlock() && (previous != nullptr) && previous->IsSuspendCheck()) {
     GenerateSuspendCheck(previous->AsSuspendCheck(), nullptr);
-    codegen_->MaybeGenerateMarkingRegisterCheck(/* code */ 2);
+    codegen_->MaybeGenerateMarkingRegisterCheck(/* code= */ 2);
   }
   if (!codegen_->GoesToNextBlock(block, successor)) {
     __ B(codegen_->GetLabelOf(successor));
@@ -2606,7 +2606,7 @@
       nullptr : codegen_->GetLabelOf(true_successor);
   vixl32::Label* false_target = codegen_->GoesToNextBlock(if_instr->GetBlock(), false_successor) ?
       nullptr : codegen_->GetLabelOf(false_successor);
-  GenerateTestAndBranch(if_instr, /* condition_input_index */ 0, true_target, false_target);
+  GenerateTestAndBranch(if_instr, /* condition_input_index= */ 0, true_target, false_target);
 }
 
 void LocationsBuilderARMVIXL::VisitDeoptimize(HDeoptimize* deoptimize) {
@@ -2625,9 +2625,9 @@
   SlowPathCodeARMVIXL* slow_path =
       deopt_slow_paths_.NewSlowPath<DeoptimizationSlowPathARMVIXL>(deoptimize);
   GenerateTestAndBranch(deoptimize,
-                        /* condition_input_index */ 0,
+                        /* condition_input_index= */ 0,
                         slow_path->GetEntryLabel(),
-                        /* false_target */ nullptr);
+                        /* false_target= */ nullptr);
 }
 
 void LocationsBuilderARMVIXL::VisitShouldDeoptimizeFlag(HShouldDeoptimizeFlag* flag) {
@@ -2793,7 +2793,7 @@
     }
   }
 
-  GenerateTestAndBranch(select, 2, true_target, false_target, /* far_target */ false);
+  GenerateTestAndBranch(select, 2, true_target, false_target, /* far_target= */ false);
   codegen_->MoveLocation(out, src, type);
   if (output_overlaps_with_condition_inputs) {
     __ B(target);
@@ -3135,7 +3135,7 @@
 
 void InstructionCodeGeneratorARMVIXL::VisitInvokeUnresolved(HInvokeUnresolved* invoke) {
   codegen_->GenerateInvokeUnresolvedRuntimeCall(invoke);
-  codegen_->MaybeGenerateMarkingRegisterCheck(/* code */ 3);
+  codegen_->MaybeGenerateMarkingRegisterCheck(/* code= */ 3);
 }
 
 void LocationsBuilderARMVIXL::VisitInvokeStaticOrDirect(HInvokeStaticOrDirect* invoke) {
@@ -3166,7 +3166,7 @@
   DCHECK(!invoke->IsStaticWithExplicitClinitCheck());
 
   if (TryGenerateIntrinsicCode(invoke, codegen_)) {
-    codegen_->MaybeGenerateMarkingRegisterCheck(/* code */ 4);
+    codegen_->MaybeGenerateMarkingRegisterCheck(/* code= */ 4);
     return;
   }
 
@@ -3174,7 +3174,7 @@
   codegen_->GenerateStaticOrDirectCall(
       invoke, locations->HasTemps() ? locations->GetTemp(0) : Location::NoLocation());
 
-  codegen_->MaybeGenerateMarkingRegisterCheck(/* code */ 5);
+  codegen_->MaybeGenerateMarkingRegisterCheck(/* code= */ 5);
 }
 
 void LocationsBuilderARMVIXL::HandleInvoke(HInvoke* invoke) {
@@ -3193,14 +3193,14 @@
 
 void InstructionCodeGeneratorARMVIXL::VisitInvokeVirtual(HInvokeVirtual* invoke) {
   if (TryGenerateIntrinsicCode(invoke, codegen_)) {
-    codegen_->MaybeGenerateMarkingRegisterCheck(/* code */ 6);
+    codegen_->MaybeGenerateMarkingRegisterCheck(/* code= */ 6);
     return;
   }
 
   codegen_->GenerateVirtualCall(invoke, invoke->GetLocations()->GetTemp(0));
   DCHECK(!codegen_->IsLeafMethod());
 
-  codegen_->MaybeGenerateMarkingRegisterCheck(/* code */ 7);
+  codegen_->MaybeGenerateMarkingRegisterCheck(/* code= */ 7);
 }
 
 void LocationsBuilderARMVIXL::VisitInvokeInterface(HInvokeInterface* invoke) {
@@ -3278,7 +3278,7 @@
     DCHECK(!codegen_->IsLeafMethod());
   }
 
-  codegen_->MaybeGenerateMarkingRegisterCheck(/* code */ 8);
+  codegen_->MaybeGenerateMarkingRegisterCheck(/* code= */ 8);
 }
 
 void LocationsBuilderARMVIXL::VisitInvokePolymorphic(HInvokePolymorphic* invoke) {
@@ -3287,7 +3287,7 @@
 
 void InstructionCodeGeneratorARMVIXL::VisitInvokePolymorphic(HInvokePolymorphic* invoke) {
   codegen_->GenerateInvokePolymorphicCall(invoke);
-  codegen_->MaybeGenerateMarkingRegisterCheck(/* code */ 9);
+  codegen_->MaybeGenerateMarkingRegisterCheck(/* code= */ 9);
 }
 
 void LocationsBuilderARMVIXL::VisitInvokeCustom(HInvokeCustom* invoke) {
@@ -3296,7 +3296,7 @@
 
 void InstructionCodeGeneratorARMVIXL::VisitInvokeCustom(HInvokeCustom* invoke) {
   codegen_->GenerateInvokeCustomCall(invoke);
-  codegen_->MaybeGenerateMarkingRegisterCheck(/* code */ 10);
+  codegen_->MaybeGenerateMarkingRegisterCheck(/* code= */ 10);
 }
 
 void LocationsBuilderARMVIXL::VisitNeg(HNeg* neg) {
@@ -4013,7 +4013,7 @@
 
   int64_t magic;
   int shift;
-  CalculateMagicAndShiftForDivRem(imm, false /* is_long */, &magic, &shift);
+  CalculateMagicAndShiftForDivRem(imm, /* is_long= */ false, &magic, &shift);
 
   // TODO(VIXL): Change the static cast to Operand::From() after VIXL is fixed.
   __ Mov(temp1, static_cast<int32_t>(magic));
@@ -4421,7 +4421,7 @@
 
   __ Vcmp(op1, op2);
   __ Vmrs(RegisterOrAPSR_nzcv(kPcCode), FPSCR);
-  __ B(vs, &nan, /* far_target */ false);  // if un-ordered, go to NaN handling.
+  __ B(vs, &nan, /* is_far_target= */ false);  // if un-ordered, go to NaN handling.
 
   // op1 <> op2
   vixl32::ConditionType cond = is_min ? gt : lt;
@@ -4433,7 +4433,7 @@
     __ vmov(cond, F32, out, op2);
   }
   // for <>(not equal), we've done min/max calculation.
-  __ B(ne, final_label, /* far_target */ false);
+  __ B(ne, final_label, /* is_far_target= */ false);
 
   // handle op1 == op2, max(+0.0,-0.0), min(+0.0,-0.0).
   __ Vmov(temp1, op1);
@@ -4478,7 +4478,7 @@
 
   __ Vcmp(op1, op2);
   __ Vmrs(RegisterOrAPSR_nzcv(kPcCode), FPSCR);
-  __ B(vs, &handle_nan_eq, /* far_target */ false);  // if un-ordered, go to NaN handling.
+  __ B(vs, &handle_nan_eq, /* is_far_target= */ false);  // if un-ordered, go to NaN handling.
 
   // op1 <> op2
   vixl32::ConditionType cond = is_min ? gt : lt;
@@ -4490,7 +4490,7 @@
     __ vmov(cond, F64, out, op2);
   }
   // for <>(not equal), we've done min/max calculation.
-  __ B(ne, final_label, /* far_target */ false);
+  __ B(ne, final_label, /* is_far_target= */ false);
 
   // handle op1 == op2, max(+0.0,-0.0).
   if (!is_min) {
@@ -4714,7 +4714,7 @@
     __ And(shift_right, RegisterFrom(rhs), 0x1F);
     __ Lsrs(shift_left, RegisterFrom(rhs), 6);
     __ Rsb(LeaveFlags, shift_left, shift_right, Operand::From(kArmBitsPerWord));
-    __ B(cc, &shift_by_32_plus_shift_right, /* far_target */ false);
+    __ B(cc, &shift_by_32_plus_shift_right, /* is_far_target= */ false);
 
     // out_reg_hi = (reg_hi << shift_left) | (reg_lo >> shift_right).
     // out_reg_lo = (reg_lo << shift_left) | (reg_hi >> shift_right).
@@ -5030,7 +5030,7 @@
 void InstructionCodeGeneratorARMVIXL::VisitNewInstance(HNewInstance* instruction) {
   codegen_->InvokeRuntime(instruction->GetEntrypoint(), instruction, instruction->GetDexPc());
   CheckEntrypointTypes<kQuickAllocObjectWithChecks, void*, mirror::Class*>();
-  codegen_->MaybeGenerateMarkingRegisterCheck(/* code */ 11);
+  codegen_->MaybeGenerateMarkingRegisterCheck(/* code= */ 11);
 }
 
 void LocationsBuilderARMVIXL::VisitNewArray(HNewArray* instruction) {
@@ -5048,7 +5048,7 @@
   codegen_->InvokeRuntime(entrypoint, instruction, instruction->GetDexPc());
   CheckEntrypointTypes<kQuickAllocArrayResolved, void*, mirror::Class*, int32_t>();
   DCHECK(!codegen_->IsLeafMethod());
-  codegen_->MaybeGenerateMarkingRegisterCheck(/* code */ 12);
+  codegen_->MaybeGenerateMarkingRegisterCheck(/* code= */ 12);
 }
 
 void LocationsBuilderARMVIXL::VisitParameterValue(HParameterValue* instruction) {
@@ -5170,8 +5170,8 @@
     }
     case DataType::Type::kInt64: {
       __ Cmp(HighRegisterFrom(left), HighRegisterFrom(right));  // Signed compare.
-      __ B(lt, &less, /* far_target */ false);
-      __ B(gt, &greater, /* far_target */ false);
+      __ B(lt, &less, /* is_far_target= */ false);
+      __ B(gt, &greater, /* is_far_target= */ false);
       // Emit move to `out` before the last `Cmp`, as `Mov` might affect the status flags.
       __ Mov(out, 0);
       __ Cmp(LowRegisterFrom(left), LowRegisterFrom(right));  // Unsigned compare.
@@ -5192,8 +5192,8 @@
       UNREACHABLE();
   }
 
-  __ B(eq, final_label, /* far_target */ false);
-  __ B(less_cond, &less, /* far_target */ false);
+  __ B(eq, final_label, /* is_far_target= */ false);
+  __ B(less_cond, &less, /* is_far_target= */ false);
 
   __ Bind(&greater);
   __ Mov(out, 1);
@@ -5608,7 +5608,7 @@
         // Note that a potential implicit null check is handled in this
         // CodeGeneratorARMVIXL::GenerateFieldLoadWithBakerReadBarrier call.
         codegen_->GenerateFieldLoadWithBakerReadBarrier(
-            instruction, out, base, offset, maybe_temp, /* needs_null_check */ true);
+            instruction, out, base, offset, maybe_temp, /* needs_null_check= */ true);
         if (is_volatile) {
           codegen_->GenerateMemoryBarrier(MemBarrierKind::kLoadAny);
         }
@@ -5964,7 +5964,7 @@
           __ Lsrs(length, length, 1u);  // LSRS has a 16-bit encoding, TST (immediate) does not.
           static_assert(static_cast<uint32_t>(mirror::StringCompressionFlag::kCompressed) == 0u,
                         "Expecting 0=compressed, 1=uncompressed");
-          __ B(cs, &uncompressed_load, /* far_target */ false);
+          __ B(cs, &uncompressed_load, /* is_far_target= */ false);
           GetAssembler()->LoadFromOffset(kLoadUnsignedByte,
                                          RegisterFrom(out_loc),
                                          obj,
@@ -6006,7 +6006,7 @@
           __ Lsrs(length, length, 1u);  // LSRS has a 16-bit encoding, TST (immediate) does not.
           static_assert(static_cast<uint32_t>(mirror::StringCompressionFlag::kCompressed) == 0u,
                         "Expecting 0=compressed, 1=uncompressed");
-          __ B(cs, &uncompressed_load, /* far_target */ false);
+          __ B(cs, &uncompressed_load, /* is_far_target= */ false);
           __ Ldrb(RegisterFrom(out_loc), MemOperand(temp, RegisterFrom(index), vixl32::LSL, 0));
           __ B(final_label);
           __ Bind(&uncompressed_load);
@@ -6046,11 +6046,11 @@
                                                           obj,
                                                           data_offset,
                                                           maybe_temp,
-                                                          /* needs_null_check */ false);
+                                                          /* needs_null_check= */ false);
         } else {
           Location temp = locations->GetTemp(0);
           codegen_->GenerateArrayLoadWithBakerReadBarrier(
-              out_loc, obj, data_offset, index, temp, /* needs_null_check */ false);
+              out_loc, obj, data_offset, index, temp, /* needs_null_check= */ false);
         }
       } else {
         vixl32::Register out = OutputRegister(instruction);
@@ -6325,7 +6325,7 @@
 
         if (instruction->StaticTypeOfArrayIsObjectArray()) {
           vixl32::Label do_put;
-          __ B(eq, &do_put, /* far_target */ false);
+          __ B(eq, &do_put, /* is_far_target= */ false);
           // If heap poisoning is enabled, the `temp1` reference has
           // not been unpoisoned yet; unpoison it now.
           GetAssembler()->MaybeUnpoisonHeapReference(temp1);
@@ -6627,7 +6627,7 @@
     return;
   }
   GenerateSuspendCheck(instruction, nullptr);
-  codegen_->MaybeGenerateMarkingRegisterCheck(/* code */ 13);
+  codegen_->MaybeGenerateMarkingRegisterCheck(/* code= */ 13);
 }
 
 void InstructionCodeGeneratorARMVIXL::GenerateSuspendCheck(HSuspendCheck* instruction,
@@ -6975,7 +6975,7 @@
   HLoadClass::LoadKind load_kind = cls->GetLoadKind();
   if (load_kind == HLoadClass::LoadKind::kRuntimeCall) {
     codegen_->GenerateLoadClassRuntimeCall(cls);
-    codegen_->MaybeGenerateMarkingRegisterCheck(/* code */ 14);
+    codegen_->MaybeGenerateMarkingRegisterCheck(/* code= */ 14);
     return;
   }
   DCHECK(!cls->NeedsAccessCheck());
@@ -7014,14 +7014,14 @@
       CodeGeneratorARMVIXL::PcRelativePatchInfo* labels =
           codegen_->NewBootImageRelRoPatch(codegen_->GetBootImageOffset(cls));
       codegen_->EmitMovwMovtPlaceholder(labels, out);
-      __ Ldr(out, MemOperand(out, /* offset */ 0));
+      __ Ldr(out, MemOperand(out, /* offset= */ 0));
       break;
     }
     case HLoadClass::LoadKind::kBssEntry: {
       CodeGeneratorARMVIXL::PcRelativePatchInfo* labels =
           codegen_->NewTypeBssEntryPatch(cls->GetDexFile(), cls->GetTypeIndex());
       codegen_->EmitMovwMovtPlaceholder(labels, out);
-      codegen_->GenerateGcRootFieldLoad(cls, out_loc, out, /* offset */ 0, read_barrier_option);
+      codegen_->GenerateGcRootFieldLoad(cls, out_loc, out, /* offset= */ 0, read_barrier_option);
       generate_null_check = true;
       break;
     }
@@ -7037,7 +7037,7 @@
                                                        cls->GetTypeIndex(),
                                                        cls->GetClass()));
       // /* GcRoot<mirror::Class> */ out = *out
-      codegen_->GenerateGcRootFieldLoad(cls, out_loc, out, /* offset */ 0, read_barrier_option);
+      codegen_->GenerateGcRootFieldLoad(cls, out_loc, out, /* offset= */ 0, read_barrier_option);
       break;
     }
     case HLoadClass::LoadKind::kRuntimeCall:
@@ -7059,7 +7059,7 @@
     } else {
       __ Bind(slow_path->GetExitLabel());
     }
-    codegen_->MaybeGenerateMarkingRegisterCheck(/* code */ 15);
+    codegen_->MaybeGenerateMarkingRegisterCheck(/* code= */ 15);
   }
 }
 
@@ -7240,7 +7240,7 @@
       CodeGeneratorARMVIXL::PcRelativePatchInfo* labels =
           codegen_->NewBootImageRelRoPatch(codegen_->GetBootImageOffset(load));
       codegen_->EmitMovwMovtPlaceholder(labels, out);
-      __ Ldr(out, MemOperand(out, /* offset */ 0));
+      __ Ldr(out, MemOperand(out, /* offset= */ 0));
       return;
     }
     case HLoadString::LoadKind::kBssEntry: {
@@ -7249,13 +7249,13 @@
           codegen_->NewStringBssEntryPatch(load->GetDexFile(), load->GetStringIndex());
       codegen_->EmitMovwMovtPlaceholder(labels, out);
       codegen_->GenerateGcRootFieldLoad(
-          load, out_loc, out, /* offset */ 0, kCompilerReadBarrierOption);
+          load, out_loc, out, /* offset= */ 0, kCompilerReadBarrierOption);
       LoadStringSlowPathARMVIXL* slow_path =
           new (codegen_->GetScopedAllocator()) LoadStringSlowPathARMVIXL(load);
       codegen_->AddSlowPath(slow_path);
       __ CompareAndBranchIfZero(out, slow_path->GetEntryLabel());
       __ Bind(slow_path->GetExitLabel());
-      codegen_->MaybeGenerateMarkingRegisterCheck(/* code */ 16);
+      codegen_->MaybeGenerateMarkingRegisterCheck(/* code= */ 16);
       return;
     }
     case HLoadString::LoadKind::kJitBootImageAddress: {
@@ -7270,7 +7270,7 @@
                                                         load->GetString()));
       // /* GcRoot<mirror::String> */ out = *out
       codegen_->GenerateGcRootFieldLoad(
-          load, out_loc, out, /* offset */ 0, kCompilerReadBarrierOption);
+          load, out_loc, out, /* offset= */ 0, kCompilerReadBarrierOption);
       return;
     }
     default:
@@ -7283,7 +7283,7 @@
   __ Mov(calling_convention.GetRegisterAt(0), load->GetStringIndex().index_);
   codegen_->InvokeRuntime(kQuickResolveString, load, load->GetDexPc());
   CheckEntrypointTypes<kQuickResolveString, void*, uint32_t>();
-  codegen_->MaybeGenerateMarkingRegisterCheck(/* code */ 17);
+  codegen_->MaybeGenerateMarkingRegisterCheck(/* code= */ 17);
 }
 
 static int32_t GetExceptionTlsOffset() {
@@ -7415,7 +7415,7 @@
   if (instruction->MustDoNullCheck()) {
     DCHECK(!out.Is(obj));
     __ Mov(out, 0);
-    __ CompareAndBranchIfZero(obj, final_label, /* far_target */ false);
+    __ CompareAndBranchIfZero(obj, final_label, /* is_far_target= */ false);
   }
 
   switch (type_check_kind) {
@@ -7447,7 +7447,7 @@
         __ it(eq);
         __ mov(eq, out, 1);
       } else {
-        __ B(ne, final_label, /* far_target */ false);
+        __ B(ne, final_label, /* is_far_target= */ false);
         __ Mov(out, 1);
       }
 
@@ -7475,9 +7475,9 @@
                                        maybe_temp_loc,
                                        read_barrier_option);
       // If `out` is null, we use it for the result, and jump to the final label.
-      __ CompareAndBranchIfZero(out, final_label, /* far_target */ false);
+      __ CompareAndBranchIfZero(out, final_label, /* is_far_target= */ false);
       __ Cmp(out, cls);
-      __ B(ne, &loop, /* far_target */ false);
+      __ B(ne, &loop, /* is_far_target= */ false);
       __ Mov(out, 1);
       break;
     }
@@ -7496,7 +7496,7 @@
       vixl32::Label loop, success;
       __ Bind(&loop);
       __ Cmp(out, cls);
-      __ B(eq, &success, /* far_target */ false);
+      __ B(eq, &success, /* is_far_target= */ false);
       // /* HeapReference<Class> */ out = out->super_class_
       GenerateReferenceLoadOneRegister(instruction,
                                        out_loc,
@@ -7506,7 +7506,7 @@
       // This is essentially a null check, but it sets the condition flags to the
       // proper value for the code that follows the loop, i.e. not `eq`.
       __ Cmp(out, 1);
-      __ B(hs, &loop, /* far_target */ false);
+      __ B(hs, &loop, /* is_far_target= */ false);
 
       // Since IT blocks longer than a 16-bit instruction are deprecated by ARMv8,
       // we check that the output is in a low register, so that a 16-bit MOV
@@ -7551,7 +7551,7 @@
       // Do an exact check.
       vixl32::Label exact_check;
       __ Cmp(out, cls);
-      __ B(eq, &exact_check, /* far_target */ false);
+      __ B(eq, &exact_check, /* is_far_target= */ false);
       // Otherwise, we need to check that the object's class is a non-primitive array.
       // /* HeapReference<Class> */ out = out->component_type_
       GenerateReferenceLoadOneRegister(instruction,
@@ -7560,7 +7560,7 @@
                                        maybe_temp_loc,
                                        read_barrier_option);
       // If `out` is null, we use it for the result, and jump to the final label.
-      __ CompareAndBranchIfZero(out, final_label, /* far_target */ false);
+      __ CompareAndBranchIfZero(out, final_label, /* is_far_target= */ false);
       GetAssembler()->LoadFromOffset(kLoadUnsignedHalfword, out, out, primitive_offset);
       static_assert(Primitive::kPrimNot == 0, "Expected 0 for kPrimNot");
       __ Cmp(out, 0);
@@ -7582,7 +7582,7 @@
         __ it(eq);
         __ mov(eq, out, 1);
       } else {
-        __ B(ne, final_label, /* far_target */ false);
+        __ B(ne, final_label, /* is_far_target= */ false);
         __ Bind(&exact_check);
         __ Mov(out, 1);
       }
@@ -7602,7 +7602,7 @@
       __ Cmp(out, cls);
       DCHECK(locations->OnlyCallsOnSlowPath());
       slow_path = new (codegen_->GetScopedAllocator()) TypeCheckSlowPathARMVIXL(
-          instruction, /* is_fatal */ false);
+          instruction, /* is_fatal= */ false);
       codegen_->AddSlowPath(slow_path);
       __ B(ne, slow_path->GetEntryLabel());
       __ Mov(out, 1);
@@ -7631,7 +7631,7 @@
       // This should also be beneficial for the other cases above.
       DCHECK(locations->OnlyCallsOnSlowPath());
       slow_path = new (codegen_->GetScopedAllocator()) TypeCheckSlowPathARMVIXL(
-          instruction, /* is_fatal */ false);
+          instruction, /* is_fatal= */ false);
       codegen_->AddSlowPath(slow_path);
       __ B(slow_path->GetEntryLabel());
       break;
@@ -7716,7 +7716,7 @@
   vixl32::Label* final_label = codegen_->GetFinalLabel(instruction, &done);
   // Avoid null check if we know obj is not null.
   if (instruction->MustDoNullCheck()) {
-    __ CompareAndBranchIfZero(obj, final_label, /* far_target */ false);
+    __ CompareAndBranchIfZero(obj, final_label, /* is_far_target= */ false);
   }
 
   switch (type_check_kind) {
@@ -7763,7 +7763,7 @@
 
       // Otherwise, compare the classes.
       __ Cmp(temp, cls);
-      __ B(ne, &loop, /* far_target */ false);
+      __ B(ne, &loop, /* is_far_target= */ false);
       break;
     }
 
@@ -7780,7 +7780,7 @@
       vixl32::Label loop;
       __ Bind(&loop);
       __ Cmp(temp, cls);
-      __ B(eq, final_label, /* far_target */ false);
+      __ B(eq, final_label, /* is_far_target= */ false);
 
       // /* HeapReference<Class> */ temp = temp->super_class_
       GenerateReferenceLoadOneRegister(instruction,
@@ -7808,7 +7808,7 @@
 
       // Do an exact check.
       __ Cmp(temp, cls);
-      __ B(eq, final_label, /* far_target */ false);
+      __ B(eq, final_label, /* is_far_target= */ false);
 
       // Otherwise, we need to check that the object's class is a non-primitive array.
       // /* HeapReference<Class> */ temp = temp->component_type_
@@ -7872,7 +7872,7 @@
       __ Sub(RegisterFrom(maybe_temp2_loc), RegisterFrom(maybe_temp2_loc), 2);
       // Compare the classes and continue the loop if they do not match.
       __ Cmp(cls, RegisterFrom(maybe_temp3_loc));
-      __ B(ne, &start_loop, /* far_target */ false);
+      __ B(ne, &start_loop, /* is_far_target= */ false);
       break;
     }
 
@@ -7913,7 +7913,7 @@
   } else {
     CheckEntrypointTypes<kQuickUnlockObject, void, mirror::Object*>();
   }
-  codegen_->MaybeGenerateMarkingRegisterCheck(/* code */ 18);
+  codegen_->MaybeGenerateMarkingRegisterCheck(/* code= */ 18);
 }
 
 void LocationsBuilderARMVIXL::VisitAnd(HAnd* instruction) {
@@ -8268,7 +8268,7 @@
       // Load with fast path based Baker's read barrier.
       // /* HeapReference<Object> */ out = *(out + offset)
       codegen_->GenerateFieldLoadWithBakerReadBarrier(
-          instruction, out, out_reg, offset, maybe_temp, /* needs_null_check */ false);
+          instruction, out, out_reg, offset, maybe_temp, /* needs_null_check= */ false);
     } else {
       // Load with slow path based read barrier.
       // Save the value of `out` into `maybe_temp` before overwriting it
@@ -8303,7 +8303,7 @@
       // Load with fast path based Baker's read barrier.
       // /* HeapReference<Object> */ out = *(obj + offset)
       codegen_->GenerateFieldLoadWithBakerReadBarrier(
-          instruction, out, obj_reg, offset, maybe_temp, /* needs_null_check */ false);
+          instruction, out, obj_reg, offset, maybe_temp, /* needs_null_check= */ false);
     } else {
       // Load with slow path based read barrier.
       // /* HeapReference<Object> */ out = *(obj + offset)
@@ -8384,7 +8384,7 @@
     // Note that GC roots are not affected by heap poisoning, thus we
     // do not have to unpoison `root_reg` here.
   }
-  MaybeGenerateMarkingRegisterCheck(/* code */ 19);
+  MaybeGenerateMarkingRegisterCheck(/* code= */ 19);
 }
 
 void CodeGeneratorARMVIXL::GenerateUnsafeCasOldValueAddWithBakerReadBarrier(
@@ -8484,7 +8484,7 @@
               narrow ? BAKER_MARK_INTROSPECTION_FIELD_LDR_NARROW_OFFSET
                      : BAKER_MARK_INTROSPECTION_FIELD_LDR_WIDE_OFFSET);
   }
-  MaybeGenerateMarkingRegisterCheck(/* code */ 20, /* temp_loc */ LocationFrom(ip));
+  MaybeGenerateMarkingRegisterCheck(/* code= */ 20, /* temp_loc= */ LocationFrom(ip));
 }
 
 void CodeGeneratorARMVIXL::GenerateFieldLoadWithBakerReadBarrier(HInstruction* instruction,
@@ -8572,7 +8572,7 @@
     DCHECK_EQ(old_offset - GetVIXLAssembler()->GetBuffer()->GetCursorOffset(),
               BAKER_MARK_INTROSPECTION_ARRAY_LDR_OFFSET);
   }
-  MaybeGenerateMarkingRegisterCheck(/* code */ 21, /* temp_loc */ LocationFrom(ip));
+  MaybeGenerateMarkingRegisterCheck(/* code= */ 21, /* temp_loc= */ LocationFrom(ip));
 }
 
 void CodeGeneratorARMVIXL::MaybeGenerateMarkingRegisterCheck(int code, Location temp_loc) {
@@ -8815,12 +8815,12 @@
 
 CodeGeneratorARMVIXL::PcRelativePatchInfo* CodeGeneratorARMVIXL::NewBootImageIntrinsicPatch(
     uint32_t intrinsic_data) {
-  return NewPcRelativePatch(/* dex_file */ nullptr, intrinsic_data, &boot_image_intrinsic_patches_);
+  return NewPcRelativePatch(/* dex_file= */ nullptr, intrinsic_data, &boot_image_intrinsic_patches_);
 }
 
 CodeGeneratorARMVIXL::PcRelativePatchInfo* CodeGeneratorARMVIXL::NewBootImageRelRoPatch(
     uint32_t boot_image_offset) {
-  return NewPcRelativePatch(/* dex_file */ nullptr,
+  return NewPcRelativePatch(/* dex_file= */ nullptr,
                             boot_image_offset,
                             &boot_image_method_patches_);
 }
@@ -8891,7 +8891,7 @@
   return jit_string_patches_.GetOrCreate(
       StringReference(&dex_file, string_index),
       [this]() {
-        return GetAssembler()->CreateLiteralDestroyedWithPool<uint32_t>(/* placeholder */ 0u);
+        return GetAssembler()->CreateLiteralDestroyedWithPool<uint32_t>(/* value= */ 0u);
       });
 }
 
@@ -8902,7 +8902,7 @@
   return jit_class_patches_.GetOrCreate(
       TypeReference(&dex_file, type_index),
       [this]() {
-        return GetAssembler()->CreateLiteralDestroyedWithPool<uint32_t>(/* placeholder */ 0u);
+        return GetAssembler()->CreateLiteralDestroyedWithPool<uint32_t>(/* value= */ 0u);
       });
 }
 
@@ -8916,7 +8916,7 @@
     CodeGeneratorARMVIXL::PcRelativePatchInfo* labels =
         NewBootImageRelRoPatch(boot_image_reference);
     EmitMovwMovtPlaceholder(labels, reg);
-    __ Ldr(reg, MemOperand(reg, /* offset */ 0));
+    __ Ldr(reg, MemOperand(reg, /* offset= */ 0));
   } else {
     DCHECK(Runtime::Current()->UseJitCompilation());
     gc::Heap* heap = Runtime::Current()->GetHeap();
@@ -9061,7 +9061,7 @@
   return map->GetOrCreate(
       value,
       [this, value]() {
-        return GetAssembler()->CreateLiteralDestroyedWithPool<uint32_t>(/* placeholder */ value);
+        return GetAssembler()->CreateLiteralDestroyedWithPool<uint32_t>(/* value= */ value);
       });
 }
 
@@ -9288,9 +9288,9 @@
                          CodeBufferCheckScope::kMaximumSize);
   // TODO(VIXL): Think about using mov instead of movw.
   __ bind(&labels->movw_label);
-  __ movw(out, /* placeholder */ 0u);
+  __ movw(out, /* operand= */ 0u);
   __ bind(&labels->movt_label);
-  __ movt(out, /* placeholder */ 0u);
+  __ movt(out, /* operand= */ 0u);
   __ bind(&labels->add_pc_label);
   __ add(out, out, pc);
 }
@@ -9313,7 +9313,7 @@
   static_assert(ReadBarrier::NonGrayState() == 0, "Expecting non-gray to have value 0");
   static_assert(ReadBarrier::GrayState() == 1, "Expecting gray to have value 1");
   __ Tst(ip, Operand(LockWord::kReadBarrierStateMaskShifted));
-  __ B(ne, slow_path, /* is_far_target */ false);
+  __ B(ne, slow_path, /* is_far_target= */ false);
   // To throw NPE, we return to the fast path; the artificial dependence below does not matter.
   if (throw_npe != nullptr) {
     __ Bind(throw_npe);
@@ -9360,7 +9360,7 @@
       vixl32::Label* throw_npe = nullptr;
       if (GetCompilerOptions().GetImplicitNullChecks() && holder_reg.Is(base_reg)) {
         throw_npe = &throw_npe_label;
-        __ CompareAndBranchIfZero(holder_reg, throw_npe, /* is_far_target */ false);
+        __ CompareAndBranchIfZero(holder_reg, throw_npe, /* is_far_target= */ false);
       }
       // Check if the holder is gray and, if not, add fake dependency to the base register
       // and return to the LDR instruction to load the reference. Otherwise, use introspection
@@ -9437,7 +9437,7 @@
       UseScratchRegisterScope temps(assembler.GetVIXLAssembler());
       temps.Exclude(ip);
       vixl32::Label return_label, not_marked, forwarding_address;
-      __ CompareAndBranchIfZero(root_reg, &return_label, /* is_far_target */ false);
+      __ CompareAndBranchIfZero(root_reg, &return_label, /* is_far_target= */ false);
       MemOperand lock_word(root_reg, mirror::Object::MonitorOffset().Int32Value());
       __ Ldr(ip, lock_word);
       __ Tst(ip, LockWord::kMarkBitStateMaskShifted);
diff --git a/compiler/optimizing/code_generator_mips.cc b/compiler/optimizing/code_generator_mips.cc
index c536dd3..f7f37db 100644
--- a/compiler/optimizing/code_generator_mips.cc
+++ b/compiler/optimizing/code_generator_mips.cc
@@ -587,7 +587,7 @@
       mips_codegen->InvokeRuntimeWithoutRecordingPcInfo(entry_point_offset,
                                                         instruction_,
                                                         this,
-                                                        /* direct */ false);
+                                                        /* direct= */ false);
     }
     __ B(GetExitLabel());
   }
@@ -681,7 +681,7 @@
     mips_codegen->InvokeRuntimeWithoutRecordingPcInfo(entry_point_offset,
                                                       instruction_,
                                                       this,
-                                                      /* direct */ false);
+                                                      /* direct= */ false);
 
     // If the new reference is different from the old reference,
     // update the field in the holder (`*(obj_ + field_offset_)`).
@@ -1167,9 +1167,9 @@
     __ Move(r2_l, TMP);
     __ Move(r2_h, AT);
   } else if (loc1.IsStackSlot() && loc2.IsStackSlot()) {
-    Exchange(loc1.GetStackIndex(), loc2.GetStackIndex(), /* double_slot */ false);
+    Exchange(loc1.GetStackIndex(), loc2.GetStackIndex(), /* double_slot= */ false);
   } else if (loc1.IsDoubleStackSlot() && loc2.IsDoubleStackSlot()) {
-    Exchange(loc1.GetStackIndex(), loc2.GetStackIndex(), /* double_slot */ true);
+    Exchange(loc1.GetStackIndex(), loc2.GetStackIndex(), /* double_slot= */ true);
   } else if (loc1.IsSIMDStackSlot() && loc2.IsSIMDStackSlot()) {
     ExchangeQuadSlots(loc1.GetStackIndex(), loc2.GetStackIndex());
   } else if ((loc1.IsRegister() && loc2.IsStackSlot()) ||
@@ -1654,14 +1654,14 @@
     uint32_t intrinsic_data,
     const PcRelativePatchInfo* info_high) {
   return NewPcRelativePatch(
-      /* dex_file */ nullptr, intrinsic_data, info_high, &boot_image_intrinsic_patches_);
+      /* dex_file= */ nullptr, intrinsic_data, info_high, &boot_image_intrinsic_patches_);
 }
 
 CodeGeneratorMIPS::PcRelativePatchInfo* CodeGeneratorMIPS::NewBootImageRelRoPatch(
     uint32_t boot_image_offset,
     const PcRelativePatchInfo* info_high) {
   return NewPcRelativePatch(
-      /* dex_file */ nullptr, boot_image_offset, info_high, &boot_image_method_patches_);
+      /* dex_file= */ nullptr, boot_image_offset, info_high, &boot_image_method_patches_);
 }
 
 CodeGeneratorMIPS::PcRelativePatchInfo* CodeGeneratorMIPS::NewBootImageMethodPatch(
@@ -1737,7 +1737,7 @@
     __ Bind(&info_high->label);
     __ Bind(&info_high->pc_rel_label);
     // Add the high half of a 32-bit offset to PC.
-    __ Auipc(out, /* placeholder */ 0x1234);
+    __ Auipc(out, /* imm16= */ 0x1234);
     __ SetReorder(reordering);
   } else {
     // If base is ZERO, emit NAL to obtain the actual base.
@@ -1746,7 +1746,7 @@
       __ Nal();
     }
     __ Bind(&info_high->label);
-    __ Lui(out, /* placeholder */ 0x1234);
+    __ Lui(out, /* imm16= */ 0x1234);
     // If we emitted the NAL, bind the pc_rel_label, otherwise base is a register holding
     // the HMipsComputeBaseMethodAddress which has its own label stored in MipsAssembler.
     if (base == ZERO) {
@@ -1764,13 +1764,13 @@
   if (GetCompilerOptions().IsBootImage()) {
     PcRelativePatchInfo* info_high = NewBootImageIntrinsicPatch(boot_image_reference);
     PcRelativePatchInfo* info_low = NewBootImageIntrinsicPatch(boot_image_reference, info_high);
-    EmitPcRelativeAddressPlaceholderHigh(info_high, TMP, /* base */ ZERO);
-    __ Addiu(reg, TMP, /* placeholder */ 0x5678, &info_low->label);
+    EmitPcRelativeAddressPlaceholderHigh(info_high, TMP, /* base= */ ZERO);
+    __ Addiu(reg, TMP, /* imm16= */ 0x5678, &info_low->label);
   } else if (GetCompilerOptions().GetCompilePic()) {
     PcRelativePatchInfo* info_high = NewBootImageRelRoPatch(boot_image_reference);
     PcRelativePatchInfo* info_low = NewBootImageRelRoPatch(boot_image_reference, info_high);
-    EmitPcRelativeAddressPlaceholderHigh(info_high, reg, /* base */ ZERO);
-    __ Lw(reg, reg, /* placeholder */ 0x5678, &info_low->label);
+    EmitPcRelativeAddressPlaceholderHigh(info_high, reg, /* base= */ ZERO);
+    __ Lw(reg, reg, /* imm16= */ 0x5678, &info_low->label);
   } else {
     DCHECK(Runtime::Current()->UseJitCompilation());
     gc::Heap* heap = Runtime::Current()->GetHeap();
@@ -1793,8 +1793,8 @@
     PcRelativePatchInfo* info_high = NewBootImageTypePatch(*target_method.dex_file, type_idx);
     PcRelativePatchInfo* info_low =
         NewBootImageTypePatch(*target_method.dex_file, type_idx, info_high);
-    EmitPcRelativeAddressPlaceholderHigh(info_high, argument, /* base */ ZERO);
-    __ Addiu(argument, argument, /* placeholder */ 0x5678, &info_low->label);
+    EmitPcRelativeAddressPlaceholderHigh(info_high, argument, /* base= */ ZERO);
+    __ Addiu(argument, argument, /* imm16= */ 0x5678, &info_low->label);
   } else {
     LoadBootImageAddress(argument, boot_image_offset);
   }
@@ -2579,7 +2579,7 @@
           __ Or(dst_high, dst_high, TMP);
           __ Andi(TMP, rhs_reg, kMipsBitsPerWord);
           if (isR6) {
-            __ Beqzc(TMP, &done, /* is_bare */ true);
+            __ Beqzc(TMP, &done, /* is_bare= */ true);
             __ Move(dst_high, dst_low);
             __ Move(dst_low, ZERO);
           } else {
@@ -2595,7 +2595,7 @@
           __ Or(dst_low, dst_low, TMP);
           __ Andi(TMP, rhs_reg, kMipsBitsPerWord);
           if (isR6) {
-            __ Beqzc(TMP, &done, /* is_bare */ true);
+            __ Beqzc(TMP, &done, /* is_bare= */ true);
             __ Move(dst_low, dst_high);
             __ Sra(dst_high, dst_high, 31);
           } else {
@@ -2612,7 +2612,7 @@
           __ Or(dst_low, dst_low, TMP);
           __ Andi(TMP, rhs_reg, kMipsBitsPerWord);
           if (isR6) {
-            __ Beqzc(TMP, &done, /* is_bare */ true);
+            __ Beqzc(TMP, &done, /* is_bare= */ true);
             __ Move(dst_low, dst_high);
             __ Move(dst_high, ZERO);
           } else {
@@ -2631,7 +2631,7 @@
           __ Or(dst_high, dst_high, TMP);
           __ Andi(TMP, rhs_reg, kMipsBitsPerWord);
           if (isR6) {
-            __ Beqzc(TMP, &done, /* is_bare */ true);
+            __ Beqzc(TMP, &done, /* is_bare= */ true);
             __ Move(TMP, dst_high);
             __ Move(dst_high, dst_low);
             __ Move(dst_low, TMP);
@@ -2862,7 +2862,7 @@
                                                           obj,
                                                           offset,
                                                           temp,
-                                                          /* needs_null_check */ false);
+                                                          /* needs_null_check= */ false);
         } else {
           codegen_->GenerateArrayLoadWithBakerReadBarrier(instruction,
                                                           out_loc,
@@ -2870,7 +2870,7 @@
                                                           data_offset,
                                                           index,
                                                           temp,
-                                                          /* needs_null_check */ false);
+                                                          /* needs_null_check= */ false);
         }
       } else {
         Register out = out_loc.AsRegister<Register>();
@@ -4104,7 +4104,7 @@
 
   int64_t magic;
   int shift;
-  CalculateMagicAndShiftForDivRem(imm, false /* is_long */, &magic, &shift);
+  CalculateMagicAndShiftForDivRem(imm, false /* is_long= */, &magic, &shift);
 
   bool isR6 = codegen_->GetInstructionSetFeatures().IsR6();
 
@@ -5948,7 +5948,7 @@
       nullptr : codegen_->GetLabelOf(true_successor);
   MipsLabel* false_target = codegen_->GoesToNextBlock(if_instr->GetBlock(), false_successor) ?
       nullptr : codegen_->GetLabelOf(false_successor);
-  GenerateTestAndBranch(if_instr, /* condition_input_index */ 0, true_target, false_target);
+  GenerateTestAndBranch(if_instr, /* condition_input_index= */ 0, true_target, false_target);
 }
 
 void LocationsBuilderMIPS::VisitDeoptimize(HDeoptimize* deoptimize) {
@@ -5967,9 +5967,9 @@
   SlowPathCodeMIPS* slow_path =
       deopt_slow_paths_.NewSlowPath<DeoptimizationSlowPathMIPS>(deoptimize);
   GenerateTestAndBranch(deoptimize,
-                        /* condition_input_index */ 0,
+                        /* condition_input_index= */ 0,
                         slow_path->GetEntryLabel(),
-                        /* false_target */ nullptr);
+                        /* false_target= */ nullptr);
 }
 
 // This function returns true if a conditional move can be generated for HSelect.
@@ -5983,7 +5983,7 @@
 // of common logic.
 static bool CanMoveConditionally(HSelect* select, bool is_r6, LocationSummary* locations_to_set) {
   bool materialized = IsBooleanValueOrMaterializedCondition(select->GetCondition());
-  HInstruction* cond = select->InputAt(/* condition_input_index */ 2);
+  HInstruction* cond = select->InputAt(/* i= */ 2);
   HCondition* condition = cond->AsCondition();
 
   DataType::Type cond_type =
@@ -6216,7 +6216,7 @@
   Location src = locations->InAt(1);
   Register src_reg = ZERO;
   Register src_reg_high = ZERO;
-  HInstruction* cond = select->InputAt(/* condition_input_index */ 2);
+  HInstruction* cond = select->InputAt(/* i= */ 2);
   Register cond_reg = TMP;
   int cond_cc = 0;
   DataType::Type cond_type = DataType::Type::kInt32;
@@ -6224,7 +6224,7 @@
   DataType::Type dst_type = select->GetType();
 
   if (IsBooleanValueOrMaterializedCondition(cond)) {
-    cond_reg = locations->InAt(/* condition_input_index */ 2).AsRegister<Register>();
+    cond_reg = locations->InAt(/* at= */ 2).AsRegister<Register>();
   } else {
     HCondition* condition = cond->AsCondition();
     LocationSummary* cond_locations = cond->GetLocations();
@@ -6337,7 +6337,7 @@
   Location dst = locations->Out();
   Location false_src = locations->InAt(0);
   Location true_src = locations->InAt(1);
-  HInstruction* cond = select->InputAt(/* condition_input_index */ 2);
+  HInstruction* cond = select->InputAt(/* i= */ 2);
   Register cond_reg = TMP;
   FRegister fcond_reg = FTMP;
   DataType::Type cond_type = DataType::Type::kInt32;
@@ -6345,7 +6345,7 @@
   DataType::Type dst_type = select->GetType();
 
   if (IsBooleanValueOrMaterializedCondition(cond)) {
-    cond_reg = locations->InAt(/* condition_input_index */ 2).AsRegister<Register>();
+    cond_reg = locations->InAt(/* at= */ 2).AsRegister<Register>();
   } else {
     HCondition* condition = cond->AsCondition();
     LocationSummary* cond_locations = cond->GetLocations();
@@ -6526,7 +6526,7 @@
 
 void InstructionCodeGeneratorMIPS::VisitSelect(HSelect* select) {
   bool is_r6 = codegen_->GetInstructionSetFeatures().IsR6();
-  if (CanMoveConditionally(select, is_r6, /* locations_to_set */ nullptr)) {
+  if (CanMoveConditionally(select, is_r6, /* locations_to_set= */ nullptr)) {
     if (is_r6) {
       GenConditionalMoveR6(select);
     } else {
@@ -6536,8 +6536,8 @@
     LocationSummary* locations = select->GetLocations();
     MipsLabel false_target;
     GenerateTestAndBranch(select,
-                          /* condition_input_index */ 2,
-                          /* true_target */ nullptr,
+                          /* condition_input_index= */ 2,
+                          /* true_target= */ nullptr,
                           &false_target);
     codegen_->MoveLocation(locations->Out(), locations->InAt(1), select->GetType());
     __ Bind(&false_target);
@@ -6696,7 +6696,7 @@
                                                         obj,
                                                         offset,
                                                         temp_loc,
-                                                        /* needs_null_check */ true);
+                                                        /* needs_null_check= */ true);
         if (is_volatile) {
           GenerateMemoryBarrier(MemBarrierKind::kLoadAny);
         }
@@ -6929,7 +6929,7 @@
                                                       out_reg,
                                                       offset,
                                                       maybe_temp,
-                                                      /* needs_null_check */ false);
+                                                      /* needs_null_check= */ false);
     } else {
       // Load with slow path based read barrier.
       // Save the value of `out` into `maybe_temp` before overwriting it
@@ -6970,7 +6970,7 @@
                                                       obj_reg,
                                                       offset,
                                                       maybe_temp,
-                                                      /* needs_null_check */ false);
+                                                      /* needs_null_check= */ false);
     } else {
       // Load with slow path based read barrier.
       // /* HeapReference<Object> */ out = *(obj + offset)
@@ -7061,7 +7061,7 @@
           __ AddUpper(base, obj, offset_high);
         }
         MipsLabel skip_call;
-        __ Beqz(T9, &skip_call, /* is_bare */ true);
+        __ Beqz(T9, &skip_call, /* is_bare= */ true);
         if (label_low != nullptr) {
           DCHECK(short_offset);
           __ Bind(label_low);
@@ -7216,11 +7216,11 @@
     MipsLabel skip_call;
     if (short_offset) {
       if (isR6) {
-        __ Beqzc(T9, &skip_call, /* is_bare */ true);
+        __ Beqzc(T9, &skip_call, /* is_bare= */ true);
         __ Nop();  // In forbidden slot.
         __ Jialc(T9, thunk_disp);
       } else {
-        __ Beqz(T9, &skip_call, /* is_bare */ true);
+        __ Beqz(T9, &skip_call, /* is_bare= */ true);
         __ Addiu(T9, T9, thunk_disp);  // In delay slot.
         __ Jalr(T9);
         __ Nop();  // In delay slot.
@@ -7228,13 +7228,13 @@
       __ Bind(&skip_call);
     } else {
       if (isR6) {
-        __ Beqz(T9, &skip_call, /* is_bare */ true);
+        __ Beqz(T9, &skip_call, /* is_bare= */ true);
         __ Aui(base, obj, offset_high);  // In delay slot.
         __ Jialc(T9, thunk_disp);
         __ Bind(&skip_call);
       } else {
         __ Lui(base, offset_high);
-        __ Beqz(T9, &skip_call, /* is_bare */ true);
+        __ Beqz(T9, &skip_call, /* is_bare= */ true);
         __ Addiu(T9, T9, thunk_disp);  // In delay slot.
         __ Jalr(T9);
         __ Bind(&skip_call);
@@ -7311,7 +7311,7 @@
     // We will not do the explicit null check in the thunk as some form of a null check
     // must've been done earlier.
     DCHECK(!needs_null_check);
-    const int thunk_disp = GetBakerMarkFieldArrayThunkDisplacement(obj, /* short_offset */ false);
+    const int thunk_disp = GetBakerMarkFieldArrayThunkDisplacement(obj, /* short_offset= */ false);
     // Loading the entrypoint does not require a load acquire since it is only changed when
     // threads are suspended or running a checkpoint.
     __ LoadFromOffset(kLoadWord, T9, TR, entry_point_offset);
@@ -7321,13 +7321,13 @@
         : index.AsRegister<Register>();
     MipsLabel skip_call;
     if (GetInstructionSetFeatures().IsR6()) {
-      __ Beqz(T9, &skip_call, /* is_bare */ true);
+      __ Beqz(T9, &skip_call, /* is_bare= */ true);
       __ Lsa(TMP, index_reg, obj, scale_factor);  // In delay slot.
       __ Jialc(T9, thunk_disp);
       __ Bind(&skip_call);
     } else {
       __ Sll(TMP, index_reg, scale_factor);
-      __ Beqz(T9, &skip_call, /* is_bare */ true);
+      __ Beqz(T9, &skip_call, /* is_bare= */ true);
       __ Addiu(T9, T9, thunk_disp);  // In delay slot.
       __ Jalr(T9);
       __ Bind(&skip_call);
@@ -7442,7 +7442,7 @@
         ReadBarrierMarkAndUpdateFieldSlowPathMIPS(instruction,
                                                   ref,
                                                   obj,
-                                                  /* field_offset */ index,
+                                                  /* field_offset= */ index,
                                                   temp_reg);
   } else {
     slow_path = new (GetScopedAllocator()) ReadBarrierMarkSlowPathMIPS(instruction, ref);
@@ -7705,7 +7705,7 @@
                                         kWithoutReadBarrier);
       DCHECK(locations->OnlyCallsOnSlowPath());
       slow_path = new (codegen_->GetScopedAllocator()) TypeCheckSlowPathMIPS(
-          instruction, /* is_fatal */ false);
+          instruction, /* is_fatal= */ false);
       codegen_->AddSlowPath(slow_path);
       __ Bne(out, cls.AsRegister<Register>(), slow_path->GetEntryLabel());
       __ LoadConst32(out, 1);
@@ -7734,7 +7734,7 @@
       // This should also be beneficial for the other cases above.
       DCHECK(locations->OnlyCallsOnSlowPath());
       slow_path = new (codegen_->GetScopedAllocator()) TypeCheckSlowPathMIPS(
-          instruction, /* is_fatal */ false);
+          instruction, /* is_fatal= */ false);
       codegen_->AddSlowPath(slow_path);
       __ B(slow_path->GetEntryLabel());
       break;
@@ -8001,7 +8001,7 @@
           NewBootImageMethodPatch(invoke->GetTargetMethod(), info_high);
       Register temp_reg = temp.AsRegister<Register>();
       EmitPcRelativeAddressPlaceholderHigh(info_high, TMP, base_reg);
-      __ Addiu(temp_reg, TMP, /* placeholder */ 0x5678, &info_low->label);
+      __ Addiu(temp_reg, TMP, /* imm16= */ 0x5678, &info_low->label);
       break;
     }
     case HInvokeStaticOrDirect::MethodLoadKind::kBootImageRelRo: {
@@ -8010,7 +8010,7 @@
       PcRelativePatchInfo* info_low = NewBootImageRelRoPatch(boot_image_offset, info_high);
       Register temp_reg = temp.AsRegister<Register>();
       EmitPcRelativeAddressPlaceholderHigh(info_high, TMP, base_reg);
-      __ Lw(temp_reg, TMP, /* placeholder */ 0x5678, &info_low->label);
+      __ Lw(temp_reg, TMP, /* imm16= */ 0x5678, &info_low->label);
       break;
     }
     case HInvokeStaticOrDirect::MethodLoadKind::kBssEntry: {
@@ -8020,7 +8020,7 @@
           MethodReference(&GetGraph()->GetDexFile(), invoke->GetDexMethodIndex()), info_high);
       Register temp_reg = temp.AsRegister<Register>();
       EmitPcRelativeAddressPlaceholderHigh(info_high, TMP, base_reg);
-      __ Lw(temp_reg, TMP, /* placeholder */ 0x5678, &info_low->label);
+      __ Lw(temp_reg, TMP, /* imm16= */ 0x5678, &info_low->label);
       break;
     }
     case HInvokeStaticOrDirect::MethodLoadKind::kJitDirectAddress:
@@ -8226,7 +8226,7 @@
       codegen_->EmitPcRelativeAddressPlaceholderHigh(info_high,
                                                      out,
                                                      base_or_current_method_reg);
-      __ Addiu(out, out, /* placeholder */ 0x5678, &info_low->label);
+      __ Addiu(out, out, /* imm16= */ 0x5678, &info_low->label);
       break;
     }
     case HLoadClass::LoadKind::kBootImageRelRo: {
@@ -8239,7 +8239,7 @@
       codegen_->EmitPcRelativeAddressPlaceholderHigh(info_high,
                                                      out,
                                                      base_or_current_method_reg);
-      __ Lw(out, out, /* placeholder */ 0x5678, &info_low->label);
+      __ Lw(out, out, /* imm16= */ 0x5678, &info_low->label);
       break;
     }
     case HLoadClass::LoadKind::kBssEntry: {
@@ -8253,7 +8253,7 @@
       GenerateGcRootFieldLoad(cls,
                               out_loc,
                               out,
-                              /* placeholder */ 0x5678,
+                              /* offset= */ 0x5678,
                               read_barrier_option,
                               &info_low->label);
       generate_null_check = true;
@@ -8278,12 +8278,12 @@
                                                                              cls->GetClass());
       bool reordering = __ SetReorder(false);
       __ Bind(&info->high_label);
-      __ Lui(out, /* placeholder */ 0x1234);
+      __ Lui(out, /* imm16= */ 0x1234);
       __ SetReorder(reordering);
       GenerateGcRootFieldLoad(cls,
                               out_loc,
                               out,
-                              /* placeholder */ 0x5678,
+                              /* offset= */ 0x5678,
                               read_barrier_option,
                               &info->low_label);
       break;
@@ -8432,7 +8432,7 @@
       codegen_->EmitPcRelativeAddressPlaceholderHigh(info_high,
                                                      out,
                                                      base_or_current_method_reg);
-      __ Addiu(out, out, /* placeholder */ 0x5678, &info_low->label);
+      __ Addiu(out, out, /* imm16= */ 0x5678, &info_low->label);
       return;
     }
     case HLoadString::LoadKind::kBootImageRelRo: {
@@ -8445,7 +8445,7 @@
       codegen_->EmitPcRelativeAddressPlaceholderHigh(info_high,
                                                      out,
                                                      base_or_current_method_reg);
-      __ Lw(out, out, /* placeholder */ 0x5678, &info_low->label);
+      __ Lw(out, out, /* imm16= */ 0x5678, &info_low->label);
       return;
     }
     case HLoadString::LoadKind::kBssEntry: {
@@ -8460,7 +8460,7 @@
       GenerateGcRootFieldLoad(load,
                               out_loc,
                               out,
-                              /* placeholder */ 0x5678,
+                              /* offset= */ 0x5678,
                               kCompilerReadBarrierOption,
                               &info_low->label);
       SlowPathCodeMIPS* slow_path =
@@ -8489,12 +8489,12 @@
                                           load->GetString());
       bool reordering = __ SetReorder(false);
       __ Bind(&info->high_label);
-      __ Lui(out, /* placeholder */ 0x1234);
+      __ Lui(out, /* imm16= */ 0x1234);
       __ SetReorder(reordering);
       GenerateGcRootFieldLoad(load,
                               out_loc,
                               out,
-                              /* placeholder */ 0x5678,
+                              /* offset= */ 0x5678,
                               kCompilerReadBarrierOption,
                               &info->low_label);
       return;
diff --git a/compiler/optimizing/code_generator_mips64.cc b/compiler/optimizing/code_generator_mips64.cc
index 016aac7..8b6328f 100644
--- a/compiler/optimizing/code_generator_mips64.cc
+++ b/compiler/optimizing/code_generator_mips64.cc
@@ -953,7 +953,7 @@
     : CodeGenerator(graph,
                     kNumberOfGpuRegisters,
                     kNumberOfFpuRegisters,
-                    /* number_of_register_pairs */ 0,
+                    /* number_of_register_pairs= */ 0,
                     ComputeRegisterMask(reinterpret_cast<const int*>(kCoreCalleeSaves),
                                         arraysize(kCoreCalleeSaves)),
                     ComputeRegisterMask(reinterpret_cast<const int*>(kFpuCalleeSaves),
@@ -1581,14 +1581,14 @@
     uint32_t intrinsic_data,
     const PcRelativePatchInfo* info_high) {
   return NewPcRelativePatch(
-      /* dex_file */ nullptr, intrinsic_data, info_high, &boot_image_intrinsic_patches_);
+      /* dex_file= */ nullptr, intrinsic_data, info_high, &boot_image_intrinsic_patches_);
 }
 
 CodeGeneratorMIPS64::PcRelativePatchInfo* CodeGeneratorMIPS64::NewBootImageRelRoPatch(
     uint32_t boot_image_offset,
     const PcRelativePatchInfo* info_high) {
   return NewPcRelativePatch(
-      /* dex_file */ nullptr, boot_image_offset, info_high, &boot_image_method_patches_);
+      /* dex_file= */ nullptr, boot_image_offset, info_high, &boot_image_method_patches_);
 }
 
 CodeGeneratorMIPS64::PcRelativePatchInfo* CodeGeneratorMIPS64::NewBootImageMethodPatch(
@@ -1665,7 +1665,7 @@
   DCHECK(!info_high->patch_info_high);
   __ Bind(&info_high->label);
   // Add the high half of a 32-bit offset to PC.
-  __ Auipc(out, /* placeholder */ 0x1234);
+  __ Auipc(out, /* imm16= */ 0x1234);
   // A following instruction will add the sign-extended low half of the 32-bit
   // offset to `out` (e.g. ld, jialc, daddiu).
   if (info_low != nullptr) {
@@ -1679,13 +1679,13 @@
     PcRelativePatchInfo* info_high = NewBootImageIntrinsicPatch(boot_image_reference);
     PcRelativePatchInfo* info_low = NewBootImageIntrinsicPatch(boot_image_reference, info_high);
     EmitPcRelativeAddressPlaceholderHigh(info_high, AT, info_low);
-    __ Daddiu(reg, AT, /* placeholder */ 0x5678);
+    __ Daddiu(reg, AT, /* imm16= */ 0x5678);
   } else if (GetCompilerOptions().GetCompilePic()) {
     PcRelativePatchInfo* info_high = NewBootImageRelRoPatch(boot_image_reference);
     PcRelativePatchInfo* info_low = NewBootImageRelRoPatch(boot_image_reference, info_high);
     EmitPcRelativeAddressPlaceholderHigh(info_high, AT, info_low);
     // Note: Boot image is in the low 4GiB and the entry is 32-bit, so emit a 32-bit load.
-    __ Lwu(reg, AT, /* placeholder */ 0x5678);
+    __ Lwu(reg, AT, /* imm16= */ 0x5678);
   } else {
     DCHECK(Runtime::Current()->UseJitCompilation());
     gc::Heap* heap = Runtime::Current()->GetHeap();
@@ -1710,7 +1710,7 @@
     PcRelativePatchInfo* info_low =
         NewBootImageTypePatch(*target_method.dex_file, type_idx, info_high);
     EmitPcRelativeAddressPlaceholderHigh(info_high, AT, info_low);
-    __ Daddiu(argument, AT, /* placeholder */ 0x5678);
+    __ Daddiu(argument, AT, /* imm16= */ 0x5678);
   } else {
     LoadBootImageAddress(argument, boot_image_offset);
   }
@@ -1724,7 +1724,7 @@
   ReserveJitStringRoot(StringReference(&dex_file, string_index), handle);
   return jit_string_patches_.GetOrCreate(
       StringReference(&dex_file, string_index),
-      [this]() { return __ NewLiteral<uint32_t>(/* placeholder */ 0u); });
+      [this]() { return __ NewLiteral<uint32_t>(/* value= */ 0u); });
 }
 
 Literal* CodeGeneratorMIPS64::DeduplicateJitClassLiteral(const DexFile& dex_file,
@@ -1733,7 +1733,7 @@
   ReserveJitClassRoot(TypeReference(&dex_file, type_index), handle);
   return jit_class_patches_.GetOrCreate(
       TypeReference(&dex_file, type_index),
-      [this]() { return __ NewLiteral<uint32_t>(/* placeholder */ 0u); });
+      [this]() { return __ NewLiteral<uint32_t>(/* value= */ 0u); });
 }
 
 void CodeGeneratorMIPS64::PatchJitRootUse(uint8_t* code,
@@ -2458,7 +2458,7 @@
                                                           obj,
                                                           offset,
                                                           temp,
-                                                          /* needs_null_check */ false);
+                                                          /* needs_null_check= */ false);
         } else {
           codegen_->GenerateArrayLoadWithBakerReadBarrier(instruction,
                                                           out_loc,
@@ -2466,7 +2466,7 @@
                                                           data_offset,
                                                           index,
                                                           temp,
-                                                          /* needs_null_check */ false);
+                                                          /* needs_null_check= */ false);
         }
       } else {
         GpuRegister out = out_loc.AsRegister<GpuRegister>();
@@ -3337,10 +3337,10 @@
   switch (type) {
     default:
       // Integer case.
-      GenerateIntLongCompare(instruction->GetCondition(), /* is64bit */ false, locations);
+      GenerateIntLongCompare(instruction->GetCondition(), /* is64bit= */ false, locations);
       return;
     case DataType::Type::kInt64:
-      GenerateIntLongCompare(instruction->GetCondition(), /* is64bit */ true, locations);
+      GenerateIntLongCompare(instruction->GetCondition(), /* is64bit= */ true, locations);
       return;
     case DataType::Type::kFloat32:
     case DataType::Type::kFloat64:
@@ -4449,10 +4449,10 @@
 
     switch (type) {
       default:
-        GenerateIntLongCompareAndBranch(if_cond, /* is64bit */ false, locations, branch_target);
+        GenerateIntLongCompareAndBranch(if_cond, /* is64bit= */ false, locations, branch_target);
         break;
       case DataType::Type::kInt64:
-        GenerateIntLongCompareAndBranch(if_cond, /* is64bit */ true, locations, branch_target);
+        GenerateIntLongCompareAndBranch(if_cond, /* is64bit= */ true, locations, branch_target);
         break;
       case DataType::Type::kFloat32:
       case DataType::Type::kFloat64:
@@ -4482,7 +4482,7 @@
       nullptr : codegen_->GetLabelOf(true_successor);
   Mips64Label* false_target = codegen_->GoesToNextBlock(if_instr->GetBlock(), false_successor) ?
       nullptr : codegen_->GetLabelOf(false_successor);
-  GenerateTestAndBranch(if_instr, /* condition_input_index */ 0, true_target, false_target);
+  GenerateTestAndBranch(if_instr, /* condition_input_index= */ 0, true_target, false_target);
 }
 
 void LocationsBuilderMIPS64::VisitDeoptimize(HDeoptimize* deoptimize) {
@@ -4501,9 +4501,9 @@
   SlowPathCodeMIPS64* slow_path =
       deopt_slow_paths_.NewSlowPath<DeoptimizationSlowPathMIPS64>(deoptimize);
   GenerateTestAndBranch(deoptimize,
-                        /* condition_input_index */ 0,
+                        /* condition_input_index= */ 0,
                         slow_path->GetEntryLabel(),
-                        /* false_target */ nullptr);
+                        /* false_target= */ nullptr);
 }
 
 // This function returns true if a conditional move can be generated for HSelect.
@@ -4517,7 +4517,7 @@
 // of common logic.
 static bool CanMoveConditionally(HSelect* select, LocationSummary* locations_to_set) {
   bool materialized = IsBooleanValueOrMaterializedCondition(select->GetCondition());
-  HInstruction* cond = select->InputAt(/* condition_input_index */ 2);
+  HInstruction* cond = select->InputAt(/* i= */ 2);
   HCondition* condition = cond->AsCondition();
 
   DataType::Type cond_type =
@@ -4660,7 +4660,7 @@
   Location dst = locations->Out();
   Location false_src = locations->InAt(0);
   Location true_src = locations->InAt(1);
-  HInstruction* cond = select->InputAt(/* condition_input_index */ 2);
+  HInstruction* cond = select->InputAt(/* i= */ 2);
   GpuRegister cond_reg = TMP;
   FpuRegister fcond_reg = FTMP;
   DataType::Type cond_type = DataType::Type::kInt32;
@@ -4668,7 +4668,7 @@
   DataType::Type dst_type = select->GetType();
 
   if (IsBooleanValueOrMaterializedCondition(cond)) {
-    cond_reg = locations->InAt(/* condition_input_index */ 2).AsRegister<GpuRegister>();
+    cond_reg = locations->InAt(/* at= */ 2).AsRegister<GpuRegister>();
   } else {
     HCondition* condition = cond->AsCondition();
     LocationSummary* cond_locations = cond->GetLocations();
@@ -4677,13 +4677,13 @@
     switch (cond_type) {
       default:
         cond_inverted = MaterializeIntLongCompare(if_cond,
-                                                  /* is64bit */ false,
+                                                  /* is64bit= */ false,
                                                   cond_locations,
                                                   cond_reg);
         break;
       case DataType::Type::kInt64:
         cond_inverted = MaterializeIntLongCompare(if_cond,
-                                                  /* is64bit */ true,
+                                                  /* is64bit= */ true,
                                                   cond_locations,
                                                   cond_reg);
         break;
@@ -4826,14 +4826,14 @@
 }
 
 void InstructionCodeGeneratorMIPS64::VisitSelect(HSelect* select) {
-  if (CanMoveConditionally(select, /* locations_to_set */ nullptr)) {
+  if (CanMoveConditionally(select, /* locations_to_set= */ nullptr)) {
     GenConditionalMove(select);
   } else {
     LocationSummary* locations = select->GetLocations();
     Mips64Label false_target;
     GenerateTestAndBranch(select,
-                          /* condition_input_index */ 2,
-                          /* true_target */ nullptr,
+                          /* condition_input_index= */ 2,
+                          /* true_target= */ nullptr,
                           &false_target);
     codegen_->MoveLocation(locations->Out(), locations->InAt(1), select->GetType());
     __ Bind(&false_target);
@@ -4945,7 +4945,7 @@
                                                         obj,
                                                         offset,
                                                         temp_loc,
-                                                        /* needs_null_check */ true);
+                                                        /* needs_null_check= */ true);
         if (is_volatile) {
           GenerateMemoryBarrier(MemBarrierKind::kLoadAny);
         }
@@ -5101,7 +5101,7 @@
                                                       out_reg,
                                                       offset,
                                                       maybe_temp,
-                                                      /* needs_null_check */ false);
+                                                      /* needs_null_check= */ false);
     } else {
       // Load with slow path based read barrier.
       // Save the value of `out` into `maybe_temp` before overwriting it
@@ -5142,7 +5142,7 @@
                                                       obj_reg,
                                                       offset,
                                                       maybe_temp,
-                                                      /* needs_null_check */ false);
+                                                      /* needs_null_check= */ false);
     } else {
       // Load with slow path based read barrier.
       // /* HeapReference<Object> */ out = *(obj + offset)
@@ -5230,7 +5230,7 @@
           __ Daui(base, obj, offset_high);
         }
         Mips64Label skip_call;
-        __ Beqz(T9, &skip_call, /* is_bare */ true);
+        __ Beqz(T9, &skip_call, /* is_bare= */ true);
         if (label_low != nullptr) {
           DCHECK(short_offset);
           __ Bind(label_low);
@@ -5360,7 +5360,7 @@
     GpuRegister ref_reg = ref.AsRegister<GpuRegister>();
     Mips64Label skip_call;
     if (short_offset) {
-      __ Beqzc(T9, &skip_call, /* is_bare */ true);
+      __ Beqzc(T9, &skip_call, /* is_bare= */ true);
       __ Nop();  // In forbidden slot.
       __ Jialc(T9, thunk_disp);
       __ Bind(&skip_call);
@@ -5369,7 +5369,7 @@
     } else {
       int16_t offset_low = Low16Bits(offset);
       int16_t offset_high = High16Bits(offset - offset_low);  // Accounts for sign extension in lwu.
-      __ Beqz(T9, &skip_call, /* is_bare */ true);
+      __ Beqz(T9, &skip_call, /* is_bare= */ true);
       __ Daui(TMP, obj, offset_high);  // In delay slot.
       __ Jialc(T9, thunk_disp);
       __ Bind(&skip_call);
@@ -5442,12 +5442,12 @@
     // We will not do the explicit null check in the thunk as some form of a null check
     // must've been done earlier.
     DCHECK(!needs_null_check);
-    const int thunk_disp = GetBakerMarkFieldArrayThunkDisplacement(obj, /* short_offset */ false);
+    const int thunk_disp = GetBakerMarkFieldArrayThunkDisplacement(obj, /* short_offset= */ false);
     // Loading the entrypoint does not require a load acquire since it is only changed when
     // threads are suspended or running a checkpoint.
     __ LoadFromOffset(kLoadDoubleword, T9, TR, entry_point_offset);
     Mips64Label skip_call;
-    __ Beqz(T9, &skip_call, /* is_bare */ true);
+    __ Beqz(T9, &skip_call, /* is_bare= */ true);
     GpuRegister ref_reg = ref.AsRegister<GpuRegister>();
     GpuRegister index_reg = index.AsRegister<GpuRegister>();
     __ Dlsa(TMP, index_reg, obj, scale_factor);  // In delay slot.
@@ -5558,7 +5558,7 @@
         ReadBarrierMarkAndUpdateFieldSlowPathMIPS64(instruction,
                                                     ref,
                                                     obj,
-                                                    /* field_offset */ index,
+                                                    /* field_offset= */ index,
                                                     temp_reg);
   } else {
     slow_path = new (GetScopedAllocator()) ReadBarrierMarkSlowPathMIPS64(instruction, ref);
@@ -5821,7 +5821,7 @@
                                         kWithoutReadBarrier);
       DCHECK(locations->OnlyCallsOnSlowPath());
       slow_path = new (codegen_->GetScopedAllocator()) TypeCheckSlowPathMIPS64(
-          instruction, /* is_fatal */ false);
+          instruction, /* is_fatal= */ false);
       codegen_->AddSlowPath(slow_path);
       __ Bnec(out, cls.AsRegister<GpuRegister>(), slow_path->GetEntryLabel());
       __ LoadConst32(out, 1);
@@ -5850,7 +5850,7 @@
       // This should also be beneficial for the other cases above.
       DCHECK(locations->OnlyCallsOnSlowPath());
       slow_path = new (codegen_->GetScopedAllocator()) TypeCheckSlowPathMIPS64(
-          instruction, /* is_fatal */ false);
+          instruction, /* is_fatal= */ false);
       codegen_->AddSlowPath(slow_path);
       __ Bc(slow_path->GetEntryLabel());
       break;
@@ -6092,7 +6092,7 @@
       CodeGeneratorMIPS64::PcRelativePatchInfo* info_low =
           NewBootImageMethodPatch(invoke->GetTargetMethod(), info_high);
       EmitPcRelativeAddressPlaceholderHigh(info_high, AT, info_low);
-      __ Daddiu(temp.AsRegister<GpuRegister>(), AT, /* placeholder */ 0x5678);
+      __ Daddiu(temp.AsRegister<GpuRegister>(), AT, /* imm16= */ 0x5678);
       break;
     }
     case HInvokeStaticOrDirect::MethodLoadKind::kBootImageRelRo: {
@@ -6101,7 +6101,7 @@
       PcRelativePatchInfo* info_low = NewBootImageRelRoPatch(boot_image_offset, info_high);
       EmitPcRelativeAddressPlaceholderHigh(info_high, AT, info_low);
       // Note: Boot image is in the low 4GiB and the entry is 32-bit, so emit a 32-bit load.
-      __ Lwu(temp.AsRegister<GpuRegister>(), AT, /* placeholder */ 0x5678);
+      __ Lwu(temp.AsRegister<GpuRegister>(), AT, /* imm16= */ 0x5678);
       break;
     }
     case HInvokeStaticOrDirect::MethodLoadKind::kBssEntry: {
@@ -6110,7 +6110,7 @@
       PcRelativePatchInfo* info_low = NewMethodBssEntryPatch(
           MethodReference(&GetGraph()->GetDexFile(), invoke->GetDexMethodIndex()), info_high);
       EmitPcRelativeAddressPlaceholderHigh(info_high, AT, info_low);
-      __ Ld(temp.AsRegister<GpuRegister>(), AT, /* placeholder */ 0x5678);
+      __ Ld(temp.AsRegister<GpuRegister>(), AT, /* imm16= */ 0x5678);
       break;
     }
     case HInvokeStaticOrDirect::MethodLoadKind::kJitDirectAddress:
@@ -6280,7 +6280,7 @@
       CodeGeneratorMIPS64::PcRelativePatchInfo* info_low =
           codegen_->NewBootImageTypePatch(cls->GetDexFile(), cls->GetTypeIndex(), info_high);
       codegen_->EmitPcRelativeAddressPlaceholderHigh(info_high, AT, info_low);
-      __ Daddiu(out, AT, /* placeholder */ 0x5678);
+      __ Daddiu(out, AT, /* imm16= */ 0x5678);
       break;
     }
     case HLoadClass::LoadKind::kBootImageRelRo: {
@@ -6291,7 +6291,7 @@
       CodeGeneratorMIPS64::PcRelativePatchInfo* info_low =
           codegen_->NewBootImageRelRoPatch(boot_image_offset, info_high);
       codegen_->EmitPcRelativeAddressPlaceholderHigh(info_high, AT, info_low);
-      __ Lwu(out, AT, /* placeholder */ 0x5678);
+      __ Lwu(out, AT, /* imm16= */ 0x5678);
       break;
     }
     case HLoadClass::LoadKind::kBssEntry: {
@@ -6303,7 +6303,7 @@
       GenerateGcRootFieldLoad(cls,
                               out_loc,
                               out,
-                              /* placeholder */ 0x5678,
+                              /* offset= */ 0x5678,
                               read_barrier_option,
                               &info_low->label);
       generate_null_check = true;
@@ -6427,7 +6427,7 @@
       CodeGeneratorMIPS64::PcRelativePatchInfo* info_low =
           codegen_->NewBootImageStringPatch(load->GetDexFile(), load->GetStringIndex(), info_high);
       codegen_->EmitPcRelativeAddressPlaceholderHigh(info_high, AT, info_low);
-      __ Daddiu(out, AT, /* placeholder */ 0x5678);
+      __ Daddiu(out, AT, /* imm16= */ 0x5678);
       return;
     }
     case HLoadString::LoadKind::kBootImageRelRo: {
@@ -6438,7 +6438,7 @@
       CodeGeneratorMIPS64::PcRelativePatchInfo* info_low =
           codegen_->NewBootImageRelRoPatch(boot_image_offset, info_high);
       codegen_->EmitPcRelativeAddressPlaceholderHigh(info_high, AT, info_low);
-      __ Lwu(out, AT, /* placeholder */ 0x5678);
+      __ Lwu(out, AT, /* imm16= */ 0x5678);
       return;
     }
     case HLoadString::LoadKind::kBssEntry: {
@@ -6451,7 +6451,7 @@
       GenerateGcRootFieldLoad(load,
                               out_loc,
                               out,
-                              /* placeholder */ 0x5678,
+                              /* offset= */ 0x5678,
                               kCompilerReadBarrierOption,
                               &info_low->label);
       SlowPathCodeMIPS64* slow_path =
diff --git a/compiler/optimizing/code_generator_vector_mips.cc b/compiler/optimizing/code_generator_vector_mips.cc
index 09e96cc..4e9ba0d 100644
--- a/compiler/optimizing/code_generator_vector_mips.cc
+++ b/compiler/optimizing/code_generator_vector_mips.cc
@@ -74,19 +74,19 @@
       __ InsertW(static_cast<VectorRegister>(FTMP),
                  locations->InAt(0).AsRegisterPairHigh<Register>(),
                  1);
-      __ ReplicateFPToVectorRegister(dst, FTMP, /* is_double */ true);
+      __ ReplicateFPToVectorRegister(dst, FTMP, /* is_double= */ true);
       break;
     case DataType::Type::kFloat32:
       DCHECK_EQ(4u, instruction->GetVectorLength());
       __ ReplicateFPToVectorRegister(dst,
                                      locations->InAt(0).AsFpuRegister<FRegister>(),
-                                     /* is_double */ false);
+                                     /* is_double= */ false);
       break;
     case DataType::Type::kFloat64:
       DCHECK_EQ(2u, instruction->GetVectorLength());
       __ ReplicateFPToVectorRegister(dst,
                                      locations->InAt(0).AsFpuRegister<FRegister>(),
-                                     /* is_double */ true);
+                                     /* is_double= */ true);
       break;
     default:
       LOG(FATAL) << "Unsupported SIMD type: " << instruction->GetPackedType();
@@ -1344,7 +1344,7 @@
 }
 
 void LocationsBuilderMIPS::VisitVecLoad(HVecLoad* instruction) {
-  CreateVecMemLocations(GetGraph()->GetAllocator(), instruction, /* is_load */ true);
+  CreateVecMemLocations(GetGraph()->GetAllocator(), instruction, /* is_load= */ true);
 }
 
 void InstructionCodeGeneratorMIPS::VisitVecLoad(HVecLoad* instruction) {
@@ -1387,7 +1387,7 @@
 }
 
 void LocationsBuilderMIPS::VisitVecStore(HVecStore* instruction) {
-  CreateVecMemLocations(GetGraph()->GetAllocator(), instruction, /* is_load */ false);
+  CreateVecMemLocations(GetGraph()->GetAllocator(), instruction, /* is_load= */ false);
 }
 
 void InstructionCodeGeneratorMIPS::VisitVecStore(HVecStore* instruction) {
diff --git a/compiler/optimizing/code_generator_vector_mips64.cc b/compiler/optimizing/code_generator_vector_mips64.cc
index b6873b1..6467d3e 100644
--- a/compiler/optimizing/code_generator_vector_mips64.cc
+++ b/compiler/optimizing/code_generator_vector_mips64.cc
@@ -79,13 +79,13 @@
       DCHECK_EQ(4u, instruction->GetVectorLength());
       __ ReplicateFPToVectorRegister(dst,
                                      locations->InAt(0).AsFpuRegister<FpuRegister>(),
-                                     /* is_double */ false);
+                                     /* is_double= */ false);
       break;
     case DataType::Type::kFloat64:
       DCHECK_EQ(2u, instruction->GetVectorLength());
       __ ReplicateFPToVectorRegister(dst,
                                      locations->InAt(0).AsFpuRegister<FpuRegister>(),
-                                     /* is_double */ true);
+                                     /* is_double= */ true);
       break;
     default:
       LOG(FATAL) << "Unsupported SIMD type: " << instruction->GetPackedType();
@@ -1342,7 +1342,7 @@
 }
 
 void LocationsBuilderMIPS64::VisitVecLoad(HVecLoad* instruction) {
-  CreateVecMemLocations(GetGraph()->GetAllocator(), instruction, /* is_load */ true);
+  CreateVecMemLocations(GetGraph()->GetAllocator(), instruction, /* is_load= */ true);
 }
 
 void InstructionCodeGeneratorMIPS64::VisitVecLoad(HVecLoad* instruction) {
@@ -1385,7 +1385,7 @@
 }
 
 void LocationsBuilderMIPS64::VisitVecStore(HVecStore* instruction) {
-  CreateVecMemLocations(GetGraph()->GetAllocator(), instruction, /* is_load */ false);
+  CreateVecMemLocations(GetGraph()->GetAllocator(), instruction, /* is_load= */ false);
 }
 
 void InstructionCodeGeneratorMIPS64::VisitVecStore(HVecStore* instruction) {
diff --git a/compiler/optimizing/code_generator_x86.cc b/compiler/optimizing/code_generator_x86.cc
index 1b74d22..766ff78 100644
--- a/compiler/optimizing/code_generator_x86.cc
+++ b/compiler/optimizing/code_generator_x86.cc
@@ -1720,7 +1720,7 @@
       nullptr : codegen_->GetLabelOf(true_successor);
   Label* false_target = codegen_->GoesToNextBlock(if_instr->GetBlock(), false_successor) ?
       nullptr : codegen_->GetLabelOf(false_successor);
-  GenerateTestAndBranch(if_instr, /* condition_input_index */ 0, true_target, false_target);
+  GenerateTestAndBranch(if_instr, /* condition_input_index= */ 0, true_target, false_target);
 }
 
 void LocationsBuilderX86::VisitDeoptimize(HDeoptimize* deoptimize) {
@@ -1738,9 +1738,9 @@
 void InstructionCodeGeneratorX86::VisitDeoptimize(HDeoptimize* deoptimize) {
   SlowPathCode* slow_path = deopt_slow_paths_.NewSlowPath<DeoptimizationSlowPathX86>(deoptimize);
   GenerateTestAndBranch<Label>(deoptimize,
-                               /* condition_input_index */ 0,
+                               /* condition_input_index= */ 0,
                                slow_path->GetEntryLabel(),
-                               /* false_target */ nullptr);
+                               /* false_target= */ nullptr);
 }
 
 void LocationsBuilderX86::VisitShouldDeoptimizeFlag(HShouldDeoptimizeFlag* flag) {
@@ -1863,7 +1863,7 @@
   } else {
     NearLabel false_target;
     GenerateTestAndBranch<NearLabel>(
-        select, /* condition_input_index */ 2, /* true_target */ nullptr, &false_target);
+        select, /* condition_input_index= */ 2, /* true_target= */ nullptr, &false_target);
     codegen_->MoveLocation(locations->Out(), locations->InAt(1), select->GetType());
     __ Bind(&false_target);
   }
@@ -3434,8 +3434,8 @@
 
   // Load the values to the FP stack in reverse order, using temporaries if needed.
   const bool is_wide = !is_float;
-  PushOntoFPStack(second, elem_size, 2 * elem_size, /* is_fp */ true, is_wide);
-  PushOntoFPStack(first, 0, 2 * elem_size, /* is_fp */ true, is_wide);
+  PushOntoFPStack(second, elem_size, 2 * elem_size, /* is_fp= */ true, is_wide);
+  PushOntoFPStack(first, 0, 2 * elem_size, /* is_fp= */ true, is_wide);
 
   // Loop doing FPREM until we stabilize.
   NearLabel retry;
@@ -3572,7 +3572,7 @@
 
   int64_t magic;
   int shift;
-  CalculateMagicAndShiftForDivRem(imm, false /* is_long */, &magic, &shift);
+  CalculateMagicAndShiftForDivRem(imm, /* is_long= */ false, &magic, &shift);
 
   // Save the numerator.
   __ movl(num, eax);
@@ -4801,7 +4801,7 @@
     }
     case MemBarrierKind::kNTStoreStore:
       // Non-Temporal Store/Store needs an explicit fence.
-      MemoryFence(/* non-temporal */ true);
+      MemoryFence(/* non-temporal= */ true);
       break;
   }
 }
@@ -4936,14 +4936,14 @@
 void CodeGeneratorX86::RecordBootImageIntrinsicPatch(HX86ComputeBaseMethodAddress* method_address,
                                                      uint32_t intrinsic_data) {
   boot_image_intrinsic_patches_.emplace_back(
-      method_address, /* target_dex_file */ nullptr, intrinsic_data);
+      method_address, /* target_dex_file= */ nullptr, intrinsic_data);
   __ Bind(&boot_image_intrinsic_patches_.back().label);
 }
 
 void CodeGeneratorX86::RecordBootImageRelRoPatch(HX86ComputeBaseMethodAddress* method_address,
                                                  uint32_t boot_image_offset) {
   boot_image_method_patches_.emplace_back(
-      method_address, /* target_dex_file */ nullptr, boot_image_offset);
+      method_address, /* target_dex_file= */ nullptr, boot_image_offset);
   __ Bind(&boot_image_method_patches_.back().label);
 }
 
@@ -5237,7 +5237,7 @@
         // Note that a potential implicit null check is handled in this
         // CodeGeneratorX86::GenerateFieldLoadWithBakerReadBarrier call.
         codegen_->GenerateFieldLoadWithBakerReadBarrier(
-            instruction, out, base, offset, /* needs_null_check */ true);
+            instruction, out, base, offset, /* needs_null_check= */ true);
         if (is_volatile) {
           codegen_->GenerateMemoryBarrier(MemBarrierKind::kLoadAny);
         }
@@ -5720,7 +5720,7 @@
         // Note that a potential implicit null check is handled in this
         // CodeGeneratorX86::GenerateArrayLoadWithBakerReadBarrier call.
         codegen_->GenerateArrayLoadWithBakerReadBarrier(
-            instruction, out_loc, obj, data_offset, index, /* needs_null_check */ true);
+            instruction, out_loc, obj, data_offset, index, /* needs_null_check= */ true);
       } else {
         Register out = out_loc.AsRegister<Register>();
         __ movl(out, CodeGeneratorX86::ArrayAddress(obj, index, TIMES_4, data_offset));
@@ -6582,7 +6582,7 @@
           cls,
           out_loc,
           Address(current_method, ArtMethod::DeclaringClassOffset().Int32Value()),
-          /* fixup_label */ nullptr,
+          /* fixup_label= */ nullptr,
           read_barrier_option);
       break;
     }
@@ -7109,7 +7109,7 @@
       }
       DCHECK(locations->OnlyCallsOnSlowPath());
       slow_path = new (codegen_->GetScopedAllocator()) TypeCheckSlowPathX86(
-          instruction, /* is_fatal */ false);
+          instruction, /* is_fatal= */ false);
       codegen_->AddSlowPath(slow_path);
       __ j(kNotEqual, slow_path->GetEntryLabel());
       __ movl(out, Immediate(1));
@@ -7141,7 +7141,7 @@
       // This should also be beneficial for the other cases above.
       DCHECK(locations->OnlyCallsOnSlowPath());
       slow_path = new (codegen_->GetScopedAllocator()) TypeCheckSlowPathX86(
-          instruction, /* is_fatal */ false);
+          instruction, /* is_fatal= */ false);
       codegen_->AddSlowPath(slow_path);
       __ jmp(slow_path->GetEntryLabel());
       if (zero.IsLinked()) {
@@ -7650,7 +7650,7 @@
       // Load with fast path based Baker's read barrier.
       // /* HeapReference<Object> */ out = *(out + offset)
       codegen_->GenerateFieldLoadWithBakerReadBarrier(
-          instruction, out, out_reg, offset, /* needs_null_check */ false);
+          instruction, out, out_reg, offset, /* needs_null_check= */ false);
     } else {
       // Load with slow path based read barrier.
       // Save the value of `out` into `maybe_temp` before overwriting it
@@ -7684,7 +7684,7 @@
       // Load with fast path based Baker's read barrier.
       // /* HeapReference<Object> */ out = *(obj + offset)
       codegen_->GenerateFieldLoadWithBakerReadBarrier(
-          instruction, out, obj_reg, offset, /* needs_null_check */ false);
+          instruction, out, obj_reg, offset, /* needs_null_check= */ false);
     } else {
       // Load with slow path based read barrier.
       // /* HeapReference<Object> */ out = *(obj + offset)
@@ -7733,7 +7733,7 @@
 
       // Slow path marking the GC root `root`.
       SlowPathCode* slow_path = new (codegen_->GetScopedAllocator()) ReadBarrierMarkSlowPathX86(
-          instruction, root, /* unpoison_ref_before_marking */ false);
+          instruction, root, /* unpoison_ref_before_marking= */ false);
       codegen_->AddSlowPath(slow_path);
 
       // Test the entrypoint (`Thread::Current()->pReadBarrierMarkReg ## root.reg()`).
@@ -7863,10 +7863,10 @@
   if (always_update_field) {
     DCHECK(temp != nullptr);
     slow_path = new (GetScopedAllocator()) ReadBarrierMarkAndUpdateFieldSlowPathX86(
-        instruction, ref, obj, src, /* unpoison_ref_before_marking */ true, *temp);
+        instruction, ref, obj, src, /* unpoison_ref_before_marking= */ true, *temp);
   } else {
     slow_path = new (GetScopedAllocator()) ReadBarrierMarkSlowPathX86(
-        instruction, ref, /* unpoison_ref_before_marking */ true);
+        instruction, ref, /* unpoison_ref_before_marking= */ true);
   }
   AddSlowPath(slow_path);
 
diff --git a/compiler/optimizing/code_generator_x86_64.cc b/compiler/optimizing/code_generator_x86_64.cc
index 781f272..67a2aa5 100644
--- a/compiler/optimizing/code_generator_x86_64.cc
+++ b/compiler/optimizing/code_generator_x86_64.cc
@@ -992,7 +992,7 @@
       // temp = thread->string_init_entrypoint
       uint32_t offset =
           GetThreadOffset<kX86_64PointerSize>(invoke->GetStringInitEntryPoint()).Int32Value();
-      __ gs()->movq(temp.AsRegister<CpuRegister>(), Address::Absolute(offset, /* no_rip */ true));
+      __ gs()->movq(temp.AsRegister<CpuRegister>(), Address::Absolute(offset, /* no_rip= */ true));
       break;
     }
     case HInvokeStaticOrDirect::MethodLoadKind::kRecursive:
@@ -1001,19 +1001,19 @@
     case HInvokeStaticOrDirect::MethodLoadKind::kBootImageLinkTimePcRelative:
       DCHECK(GetCompilerOptions().IsBootImage());
       __ leal(temp.AsRegister<CpuRegister>(),
-              Address::Absolute(kDummy32BitOffset, /* no_rip */ false));
+              Address::Absolute(kDummy32BitOffset, /* no_rip= */ false));
       RecordBootImageMethodPatch(invoke);
       break;
     case HInvokeStaticOrDirect::MethodLoadKind::kBootImageRelRo: {
       // Note: Boot image is in the low 4GiB and the entry is 32-bit, so emit a 32-bit load.
       __ movl(temp.AsRegister<CpuRegister>(),
-              Address::Absolute(kDummy32BitOffset, /* no_rip */ false));
+              Address::Absolute(kDummy32BitOffset, /* no_rip= */ false));
       RecordBootImageRelRoPatch(GetBootImageOffset(invoke));
       break;
     }
     case HInvokeStaticOrDirect::MethodLoadKind::kBssEntry: {
       __ movq(temp.AsRegister<CpuRegister>(),
-              Address::Absolute(kDummy32BitOffset, /* no_rip */ false));
+              Address::Absolute(kDummy32BitOffset, /* no_rip= */ false));
       RecordMethodBssEntryPatch(invoke);
       break;
     }
@@ -1076,12 +1076,12 @@
 }
 
 void CodeGeneratorX86_64::RecordBootImageIntrinsicPatch(uint32_t intrinsic_data) {
-  boot_image_intrinsic_patches_.emplace_back(/* target_dex_file */ nullptr, intrinsic_data);
+  boot_image_intrinsic_patches_.emplace_back(/* target_dex_file= */ nullptr, intrinsic_data);
   __ Bind(&boot_image_intrinsic_patches_.back().label);
 }
 
 void CodeGeneratorX86_64::RecordBootImageRelRoPatch(uint32_t boot_image_offset) {
-  boot_image_method_patches_.emplace_back(/* target_dex_file */ nullptr, boot_image_offset);
+  boot_image_method_patches_.emplace_back(/* target_dex_file= */ nullptr, boot_image_offset);
   __ Bind(&boot_image_method_patches_.back().label);
 }
 
@@ -1123,10 +1123,10 @@
 
 void CodeGeneratorX86_64::LoadBootImageAddress(CpuRegister reg, uint32_t boot_image_reference) {
   if (GetCompilerOptions().IsBootImage()) {
-    __ leal(reg, Address::Absolute(CodeGeneratorX86_64::kDummy32BitOffset, /* no_rip */ false));
+    __ leal(reg, Address::Absolute(CodeGeneratorX86_64::kDummy32BitOffset, /* no_rip= */ false));
     RecordBootImageIntrinsicPatch(boot_image_reference);
   } else if (GetCompilerOptions().GetCompilePic()) {
-    __ movl(reg, Address::Absolute(CodeGeneratorX86_64::kDummy32BitOffset, /* no_rip */ false));
+    __ movl(reg, Address::Absolute(CodeGeneratorX86_64::kDummy32BitOffset, /* no_rip= */ false));
     RecordBootImageRelRoPatch(boot_image_reference);
   } else {
     DCHECK(Runtime::Current()->UseJitCompilation());
@@ -1146,7 +1146,7 @@
     DCHECK_EQ(boot_image_offset, IntrinsicVisitor::IntegerValueOfInfo::kInvalidReference);
     // Load the class the same way as for HLoadClass::LoadKind::kBootImageLinkTimePcRelative.
     __ leal(argument,
-            Address::Absolute(CodeGeneratorX86_64::kDummy32BitOffset, /* no_rip */ false));
+            Address::Absolute(CodeGeneratorX86_64::kDummy32BitOffset, /* no_rip= */ false));
     MethodReference target_method = invoke->GetTargetMethod();
     dex::TypeIndex type_idx = target_method.dex_file->GetMethodId(target_method.index).class_idx_;
     boot_image_type_patches_.emplace_back(target_method.dex_file, type_idx.index_);
@@ -1277,7 +1277,7 @@
 }
 
 void CodeGeneratorX86_64::GenerateInvokeRuntime(int32_t entry_point_offset) {
-  __ gs()->call(Address::Absolute(entry_point_offset, /* no_rip */ true));
+  __ gs()->call(Address::Absolute(entry_point_offset, /* no_rip= */ true));
 }
 
 static constexpr int kNumberOfCpuRegisterPairs = 0;
@@ -1799,7 +1799,7 @@
       nullptr : codegen_->GetLabelOf(true_successor);
   Label* false_target = codegen_->GoesToNextBlock(if_instr->GetBlock(), false_successor) ?
       nullptr : codegen_->GetLabelOf(false_successor);
-  GenerateTestAndBranch(if_instr, /* condition_input_index */ 0, true_target, false_target);
+  GenerateTestAndBranch(if_instr, /* condition_input_index= */ 0, true_target, false_target);
 }
 
 void LocationsBuilderX86_64::VisitDeoptimize(HDeoptimize* deoptimize) {
@@ -1817,9 +1817,9 @@
 void InstructionCodeGeneratorX86_64::VisitDeoptimize(HDeoptimize* deoptimize) {
   SlowPathCode* slow_path = deopt_slow_paths_.NewSlowPath<DeoptimizationSlowPathX86_64>(deoptimize);
   GenerateTestAndBranch<Label>(deoptimize,
-                               /* condition_input_index */ 0,
+                               /* condition_input_index= */ 0,
                                slow_path->GetEntryLabel(),
-                               /* false_target */ nullptr);
+                               /* false_target= */ nullptr);
 }
 
 void LocationsBuilderX86_64::VisitShouldDeoptimizeFlag(HShouldDeoptimizeFlag* flag) {
@@ -1922,8 +1922,8 @@
   } else {
     NearLabel false_target;
     GenerateTestAndBranch<NearLabel>(select,
-                                     /* condition_input_index */ 2,
-                                     /* true_target */ nullptr,
+                                     /* condition_input_index= */ 2,
+                                     /* true_target= */ nullptr,
                                      &false_target);
     codegen_->MoveLocation(locations->Out(), locations->InAt(1), select->GetType());
     __ Bind(&false_target);
@@ -3679,7 +3679,7 @@
   if (instruction->GetResultType() == DataType::Type::kInt32) {
     int imm = second.GetConstant()->AsIntConstant()->GetValue();
 
-    CalculateMagicAndShiftForDivRem(imm, false /* is_long */, &magic, &shift);
+    CalculateMagicAndShiftForDivRem(imm, false /* is_long= */, &magic, &shift);
 
     __ movl(numerator, eax);
 
@@ -3716,7 +3716,7 @@
     CpuRegister rax = eax;
     CpuRegister rdx = edx;
 
-    CalculateMagicAndShiftForDivRem(imm, true /* is_long */, &magic, &shift);
+    CalculateMagicAndShiftForDivRem(imm, true /* is_long= */, &magic, &shift);
 
     // Save the numerator.
     __ movq(numerator, rax);
@@ -4554,7 +4554,7 @@
     }
     case MemBarrierKind::kNTStoreStore:
       // Non-Temporal Store/Store needs an explicit fence.
-      MemoryFence(/* non-temporal */ true);
+      MemoryFence(/* non-temporal= */ true);
       break;
   }
 }
@@ -4631,7 +4631,7 @@
         // Note that a potential implicit null check is handled in this
         // CodeGeneratorX86_64::GenerateFieldLoadWithBakerReadBarrier call.
         codegen_->GenerateFieldLoadWithBakerReadBarrier(
-            instruction, out, base, offset, /* needs_null_check */ true);
+            instruction, out, base, offset, /* needs_null_check= */ true);
         if (is_volatile) {
           codegen_->GenerateMemoryBarrier(MemBarrierKind::kLoadAny);
         }
@@ -5086,7 +5086,7 @@
         // Note that a potential implicit null check is handled in this
         // CodeGeneratorX86_64::GenerateArrayLoadWithBakerReadBarrier call.
         codegen_->GenerateArrayLoadWithBakerReadBarrier(
-            instruction, out_loc, obj, data_offset, index, /* needs_null_check */ true);
+            instruction, out_loc, obj, data_offset, index, /* needs_null_check= */ true);
       } else {
         CpuRegister out = out_loc.AsRegister<CpuRegister>();
         __ movl(out, CodeGeneratorX86_64::ArrayAddress(obj, index, TIMES_4, data_offset));
@@ -5486,7 +5486,7 @@
   }
   // Load the address of the card table into `card`.
   __ gs()->movq(card, Address::Absolute(Thread::CardTableOffset<kX86_64PointerSize>().Int32Value(),
-                                        /* no_rip */ true));
+                                        /* no_rip= */ true));
   // Calculate the offset (in the card table) of the card corresponding to
   // `object`.
   __ movq(temp, object);
@@ -5566,7 +5566,7 @@
   }
 
   __ gs()->cmpw(Address::Absolute(Thread::ThreadFlagsOffset<kX86_64PointerSize>().Int32Value(),
-                                  /* no_rip */ true),
+                                  /* no_rip= */ true),
                 Immediate(0));
   if (successor == nullptr) {
     __ j(kNotEqual, slow_path->GetEntryLabel());
@@ -5948,25 +5948,25 @@
           cls,
           out_loc,
           Address(current_method, ArtMethod::DeclaringClassOffset().Int32Value()),
-          /* fixup_label */ nullptr,
+          /* fixup_label= */ nullptr,
           read_barrier_option);
       break;
     }
     case HLoadClass::LoadKind::kBootImageLinkTimePcRelative:
       DCHECK(codegen_->GetCompilerOptions().IsBootImage());
       DCHECK_EQ(read_barrier_option, kWithoutReadBarrier);
-      __ leal(out, Address::Absolute(CodeGeneratorX86_64::kDummy32BitOffset, /* no_rip */ false));
+      __ leal(out, Address::Absolute(CodeGeneratorX86_64::kDummy32BitOffset, /* no_rip= */ false));
       codegen_->RecordBootImageTypePatch(cls);
       break;
     case HLoadClass::LoadKind::kBootImageRelRo: {
       DCHECK(!codegen_->GetCompilerOptions().IsBootImage());
-      __ movl(out, Address::Absolute(CodeGeneratorX86_64::kDummy32BitOffset, /* no_rip */ false));
+      __ movl(out, Address::Absolute(CodeGeneratorX86_64::kDummy32BitOffset, /* no_rip= */ false));
       codegen_->RecordBootImageRelRoPatch(codegen_->GetBootImageOffset(cls));
       break;
     }
     case HLoadClass::LoadKind::kBssEntry: {
       Address address = Address::Absolute(CodeGeneratorX86_64::kDummy32BitOffset,
-                                          /* no_rip */ false);
+                                          /* no_rip= */ false);
       Label* fixup_label = codegen_->NewTypeBssEntryPatch(cls);
       // /* GcRoot<mirror::Class> */ out = *address  /* PC-relative */
       GenerateGcRootFieldLoad(cls, out_loc, address, fixup_label, read_barrier_option);
@@ -5982,7 +5982,7 @@
     }
     case HLoadClass::LoadKind::kJitTableAddress: {
       Address address = Address::Absolute(CodeGeneratorX86_64::kDummy32BitOffset,
-                                          /* no_rip */ true);
+                                          /* no_rip= */ true);
       Label* fixup_label =
           codegen_->NewJitRootClassPatch(cls->GetDexFile(), cls->GetTypeIndex(), cls->GetClass());
       // /* GcRoot<mirror::Class> */ out = *address
@@ -6107,19 +6107,19 @@
   switch (load->GetLoadKind()) {
     case HLoadString::LoadKind::kBootImageLinkTimePcRelative: {
       DCHECK(codegen_->GetCompilerOptions().IsBootImage());
-      __ leal(out, Address::Absolute(CodeGeneratorX86_64::kDummy32BitOffset, /* no_rip */ false));
+      __ leal(out, Address::Absolute(CodeGeneratorX86_64::kDummy32BitOffset, /* no_rip= */ false));
       codegen_->RecordBootImageStringPatch(load);
       return;
     }
     case HLoadString::LoadKind::kBootImageRelRo: {
       DCHECK(!codegen_->GetCompilerOptions().IsBootImage());
-      __ movl(out, Address::Absolute(CodeGeneratorX86_64::kDummy32BitOffset, /* no_rip */ false));
+      __ movl(out, Address::Absolute(CodeGeneratorX86_64::kDummy32BitOffset, /* no_rip= */ false));
       codegen_->RecordBootImageRelRoPatch(codegen_->GetBootImageOffset(load));
       return;
     }
     case HLoadString::LoadKind::kBssEntry: {
       Address address = Address::Absolute(CodeGeneratorX86_64::kDummy32BitOffset,
-                                          /* no_rip */ false);
+                                          /* no_rip= */ false);
       Label* fixup_label = codegen_->NewStringBssEntryPatch(load);
       // /* GcRoot<mirror::Class> */ out = *address  /* PC-relative */
       GenerateGcRootFieldLoad(load, out_loc, address, fixup_label, kCompilerReadBarrierOption);
@@ -6138,7 +6138,7 @@
     }
     case HLoadString::LoadKind::kJitTableAddress: {
       Address address = Address::Absolute(CodeGeneratorX86_64::kDummy32BitOffset,
-                                          /* no_rip */ true);
+                                          /* no_rip= */ true);
       Label* fixup_label = codegen_->NewJitRootStringPatch(
           load->GetDexFile(), load->GetStringIndex(), load->GetString());
       // /* GcRoot<mirror::String> */ out = *address
@@ -6160,7 +6160,7 @@
 
 static Address GetExceptionTlsAddress() {
   return Address::Absolute(Thread::ExceptionOffset<kX86_64PointerSize>().Int32Value(),
-                           /* no_rip */ true);
+                           /* no_rip= */ true);
 }
 
 void LocationsBuilderX86_64::VisitLoadException(HLoadException* load) {
@@ -6435,7 +6435,7 @@
       }
       DCHECK(locations->OnlyCallsOnSlowPath());
       slow_path = new (codegen_->GetScopedAllocator()) TypeCheckSlowPathX86_64(
-          instruction, /* is_fatal */ false);
+          instruction, /* is_fatal= */ false);
       codegen_->AddSlowPath(slow_path);
       __ j(kNotEqual, slow_path->GetEntryLabel());
       __ movl(out, Immediate(1));
@@ -6467,7 +6467,7 @@
       // This should also be beneficial for the other cases above.
       DCHECK(locations->OnlyCallsOnSlowPath());
       slow_path = new (codegen_->GetScopedAllocator()) TypeCheckSlowPathX86_64(
-          instruction, /* is_fatal */ false);
+          instruction, /* is_fatal= */ false);
       codegen_->AddSlowPath(slow_path);
       __ jmp(slow_path->GetEntryLabel());
       if (zero.IsLinked()) {
@@ -6954,7 +6954,7 @@
       // Load with fast path based Baker's read barrier.
       // /* HeapReference<Object> */ out = *(out + offset)
       codegen_->GenerateFieldLoadWithBakerReadBarrier(
-          instruction, out, out_reg, offset, /* needs_null_check */ false);
+          instruction, out, out_reg, offset, /* needs_null_check= */ false);
     } else {
       // Load with slow path based read barrier.
       // Save the value of `out` into `maybe_temp` before overwriting it
@@ -6988,7 +6988,7 @@
       // Load with fast path based Baker's read barrier.
       // /* HeapReference<Object> */ out = *(obj + offset)
       codegen_->GenerateFieldLoadWithBakerReadBarrier(
-          instruction, out, obj_reg, offset, /* needs_null_check */ false);
+          instruction, out, obj_reg, offset, /* needs_null_check= */ false);
     } else {
       // Load with slow path based read barrier.
       // /* HeapReference<Object> */ out = *(obj + offset)
@@ -7037,13 +7037,13 @@
 
       // Slow path marking the GC root `root`.
       SlowPathCode* slow_path = new (codegen_->GetScopedAllocator()) ReadBarrierMarkSlowPathX86_64(
-          instruction, root, /* unpoison_ref_before_marking */ false);
+          instruction, root, /* unpoison_ref_before_marking= */ false);
       codegen_->AddSlowPath(slow_path);
 
       // Test the `Thread::Current()->pReadBarrierMarkReg ## root.reg()` entrypoint.
       const int32_t entry_point_offset =
           Thread::ReadBarrierMarkEntryPointsOffset<kX86_64PointerSize>(root.reg());
-      __ gs()->cmpl(Address::Absolute(entry_point_offset, /* no_rip */ true), Immediate(0));
+      __ gs()->cmpl(Address::Absolute(entry_point_offset, /* no_rip= */ true), Immediate(0));
       // The entrypoint is null when the GC is not marking.
       __ j(kNotEqual, slow_path->GetEntryLabel());
       __ Bind(slow_path->GetExitLabel());
@@ -7169,10 +7169,10 @@
     DCHECK(temp1 != nullptr);
     DCHECK(temp2 != nullptr);
     slow_path = new (GetScopedAllocator()) ReadBarrierMarkAndUpdateFieldSlowPathX86_64(
-        instruction, ref, obj, src, /* unpoison_ref_before_marking */ true, *temp1, *temp2);
+        instruction, ref, obj, src, /* unpoison_ref_before_marking= */ true, *temp1, *temp2);
   } else {
     slow_path = new (GetScopedAllocator()) ReadBarrierMarkSlowPathX86_64(
-        instruction, ref, /* unpoison_ref_before_marking */ true);
+        instruction, ref, /* unpoison_ref_before_marking= */ true);
   }
   AddSlowPath(slow_path);
 
diff --git a/compiler/optimizing/code_sinking.cc b/compiler/optimizing/code_sinking.cc
index d6c9755..f406983 100644
--- a/compiler/optimizing/code_sinking.cc
+++ b/compiler/optimizing/code_sinking.cc
@@ -180,7 +180,7 @@
   DCHECK(!instruction->IsPhi());  // Makes no sense for Phi.
 
   // Find the target block.
-  CommonDominator finder(/* start_block */ nullptr);
+  CommonDominator finder(/* block= */ nullptr);
   for (const HUseListNode<HInstruction*>& use : instruction->GetUses()) {
     HInstruction* user = use.GetUser();
     if (!(filter && ShouldFilterUse(instruction, user, post_dominated))) {
@@ -259,12 +259,12 @@
 
   size_t number_of_instructions = graph_->GetCurrentInstructionId();
   ScopedArenaVector<HInstruction*> worklist(allocator.Adapter(kArenaAllocMisc));
-  ArenaBitVector processed_instructions(&allocator, number_of_instructions, /* expandable */ false);
+  ArenaBitVector processed_instructions(&allocator, number_of_instructions, /* expandable= */ false);
   processed_instructions.ClearAllBits();
-  ArenaBitVector post_dominated(&allocator, graph_->GetBlocks().size(), /* expandable */ false);
+  ArenaBitVector post_dominated(&allocator, graph_->GetBlocks().size(), /* expandable= */ false);
   post_dominated.ClearAllBits();
   ArenaBitVector instructions_that_can_move(
-      &allocator, number_of_instructions, /* expandable */ false);
+      &allocator, number_of_instructions, /* expandable= */ false);
   instructions_that_can_move.ClearAllBits();
   ScopedArenaVector<HInstruction*> move_in_order(allocator.Adapter(kArenaAllocMisc));
 
@@ -414,7 +414,7 @@
       }
       // Find the position of the instruction we're storing into, filtering out this
       // store and all other stores to that instruction.
-      position = FindIdealPosition(instruction->InputAt(0), post_dominated, /* filter */ true);
+      position = FindIdealPosition(instruction->InputAt(0), post_dominated, /* filter= */ true);
 
       // The position needs to be dominated by the store, in order for the store to move there.
       if (position == nullptr || !instruction->GetBlock()->Dominates(position->GetBlock())) {
@@ -434,7 +434,7 @@
       continue;
     }
     MaybeRecordStat(stats_, MethodCompilationStat::kInstructionSunk);
-    instruction->MoveBefore(position, /* ensure_safety */ false);
+    instruction->MoveBefore(position, /* do_checks= */ false);
   }
 }
 
diff --git a/compiler/optimizing/constant_folding_test.cc b/compiler/optimizing/constant_folding_test.cc
index b1436f8..74d9d3a 100644
--- a/compiler/optimizing/constant_folding_test.cc
+++ b/compiler/optimizing/constant_folding_test.cc
@@ -70,7 +70,7 @@
 
     check_after_cf(graph_);
 
-    HDeadCodeElimination(graph_, nullptr /* stats */, "dead_code_elimination").Run();
+    HDeadCodeElimination(graph_, /* stats= */ nullptr, "dead_code_elimination").Run();
     GraphChecker graph_checker_dce(graph_);
     graph_checker_dce.Run();
     ASSERT_TRUE(graph_checker_dce.IsValid());
diff --git a/compiler/optimizing/dead_code_elimination_test.cc b/compiler/optimizing/dead_code_elimination_test.cc
index 2774535..f5cd4dc 100644
--- a/compiler/optimizing/dead_code_elimination_test.cc
+++ b/compiler/optimizing/dead_code_elimination_test.cc
@@ -43,7 +43,7 @@
   std::string actual_before = printer_before.str();
   ASSERT_EQ(actual_before, expected_before);
 
-  HDeadCodeElimination(graph, nullptr /* stats */, "dead_code_elimination").Run();
+  HDeadCodeElimination(graph, /* stats= */ nullptr, "dead_code_elimination").Run();
   GraphChecker graph_checker(graph);
   graph_checker.Run();
   ASSERT_TRUE(graph_checker.IsValid());
diff --git a/compiler/optimizing/graph_checker.cc b/compiler/optimizing/graph_checker.cc
index a689f35..01d9603 100644
--- a/compiler/optimizing/graph_checker.cc
+++ b/compiler/optimizing/graph_checker.cc
@@ -635,8 +635,8 @@
       }
     }
     CheckTypeCheckBitstringInput(
-        check, /* input_pos */ 2, check_values, expected_path_to_root, "path_to_root");
-    CheckTypeCheckBitstringInput(check, /* input_pos */ 3, check_values, expected_mask, "mask");
+        check, /* input_pos= */ 2, check_values, expected_path_to_root, "path_to_root");
+    CheckTypeCheckBitstringInput(check, /* input_pos= */ 3, check_values, expected_mask, "mask");
   } else {
     if (!input->IsLoadClass()) {
       AddError(StringPrintf("%s:%d (classic) expects a HLoadClass as second input, not %s:%d.",
@@ -931,7 +931,7 @@
           // because the BitVector reallocation strategy has very bad worst-case behavior.
           ArenaBitVector visited(&allocator,
                                  GetGraph()->GetCurrentInstructionId(),
-                                 /* expandable */ false,
+                                 /* expandable= */ false,
                                  kArenaAllocGraphChecker);
           visited.ClearAllBits();
           if (!IsConstantEquivalent(phi, other_phi, &visited)) {
diff --git a/compiler/optimizing/graph_visualizer.cc b/compiler/optimizing/graph_visualizer.cc
index a1af2be..0796620 100644
--- a/compiler/optimizing/graph_visualizer.cc
+++ b/compiler/optimizing/graph_visualizer.cc
@@ -130,10 +130,10 @@
     // been generated, so we can read data in literal pools.
     disassembler_ = std::unique_ptr<Disassembler>((*create_disassembler)(
             instruction_set,
-            new DisassemblerOptions(/* absolute_addresses */ false,
+            new DisassemblerOptions(/* absolute_addresses= */ false,
                                     base_address,
                                     end_address,
-                                    /* can_read_literals */ true,
+                                    /* can_read_literals= */ true,
                                     Is64BitInstructionSet(instruction_set)
                                         ? &Thread::DumpThreadOffset<PointerSize::k64>
                                         : &Thread::DumpThreadOffset<PointerSize::k32>)));
@@ -924,8 +924,8 @@
     HGraphVisualizerPrinter printer(graph_,
                                     *output_,
                                     "disassembly",
-                                    /* is_after_pass */ true,
-                                    /* graph_in_bad_state */ false,
+                                    /* is_after_pass= */ true,
+                                    /* graph_in_bad_state= */ false,
                                     codegen_,
                                     codegen_.GetDisassemblyInformation());
     printer.Run();
diff --git a/compiler/optimizing/gvn.cc b/compiler/optimizing/gvn.cc
index e6b6326..3689d1d 100644
--- a/compiler/optimizing/gvn.cc
+++ b/compiler/optimizing/gvn.cc
@@ -348,7 +348,7 @@
         side_effects_(side_effects),
         sets_(graph->GetBlocks().size(), nullptr, allocator_.Adapter(kArenaAllocGvn)),
         visited_blocks_(
-            &allocator_, graph->GetBlocks().size(), /* expandable */ false, kArenaAllocGvn) {
+            &allocator_, graph->GetBlocks().size(), /* expandable= */ false, kArenaAllocGvn) {
     visited_blocks_.ClearAllBits();
   }
 
@@ -546,12 +546,12 @@
     // that is larger, we return it if no perfectly-matching set is found.
     // Note that we defer testing WillBeReferencedAgain until all other criteria
     // have been satisfied because it might be expensive.
-    if (current_set->CanHoldCopyOf(reference_set, /* exact_match */ true)) {
+    if (current_set->CanHoldCopyOf(reference_set, /* exact_match= */ true)) {
       if (!WillBeReferencedAgain(current_block)) {
         return current_block;
       }
     } else if (secondary_match == nullptr &&
-               current_set->CanHoldCopyOf(reference_set, /* exact_match */ false)) {
+               current_set->CanHoldCopyOf(reference_set, /* exact_match= */ false)) {
       if (!WillBeReferencedAgain(current_block)) {
         secondary_match = current_block;
       }
diff --git a/compiler/optimizing/induction_var_range.cc b/compiler/optimizing/induction_var_range.cc
index 55eca23..4c78fa8 100644
--- a/compiler/optimizing/induction_var_range.cc
+++ b/compiler/optimizing/induction_var_range.cc
@@ -216,13 +216,13 @@
   chase_hint_ = chase_hint;
   bool in_body = context->GetBlock() != loop->GetHeader();
   int64_t stride_value = 0;
-  *min_val = SimplifyMin(GetVal(info, trip, in_body, /* is_min */ true));
-  *max_val = SimplifyMax(GetVal(info, trip, in_body, /* is_min */ false), chase_hint);
+  *min_val = SimplifyMin(GetVal(info, trip, in_body, /* is_min= */ true));
+  *max_val = SimplifyMax(GetVal(info, trip, in_body, /* is_min= */ false), chase_hint);
   *needs_finite_test = NeedsTripCount(info, &stride_value) && IsUnsafeTripCount(trip);
   chase_hint_ = nullptr;
   // Retry chasing constants for wrap-around (merge sensitive).
   if (!min_val->is_known && info->induction_class == HInductionVarAnalysis::kWrapAround) {
-    *min_val = SimplifyMin(GetVal(info, trip, in_body, /* is_min */ true));
+    *min_val = SimplifyMin(GetVal(info, trip, in_body, /* is_min= */ true));
   }
   return true;
 }
@@ -445,8 +445,8 @@
     }
     // Try range analysis on the invariant, only accept a proper range
     // to avoid arithmetic wrap-around anomalies.
-    Value min_val = GetVal(info, nullptr, /* in_body */ true, /* is_min */ true);
-    Value max_val = GetVal(info, nullptr, /* in_body */ true, /* is_min */ false);
+    Value min_val = GetVal(info, nullptr, /* in_body= */ true, /* is_min= */ true);
+    Value max_val = GetVal(info, nullptr, /* in_body= */ true, /* is_min= */ false);
     if (IsConstantValue(min_val) &&
         IsConstantValue(max_val) && min_val.b_constant <= max_val.b_constant) {
       if ((request == kExact && min_val.b_constant == max_val.b_constant) || request == kAtMost) {
@@ -791,10 +791,10 @@
     return MulRangeAndConstant(value, info1, trip, in_body, is_min);
   }
   // Interval ranges.
-  Value v1_min = GetVal(info1, trip, in_body, /* is_min */ true);
-  Value v1_max = GetVal(info1, trip, in_body, /* is_min */ false);
-  Value v2_min = GetVal(info2, trip, in_body, /* is_min */ true);
-  Value v2_max = GetVal(info2, trip, in_body, /* is_min */ false);
+  Value v1_min = GetVal(info1, trip, in_body, /* is_min= */ true);
+  Value v1_max = GetVal(info1, trip, in_body, /* is_min= */ false);
+  Value v2_min = GetVal(info2, trip, in_body, /* is_min= */ true);
+  Value v2_max = GetVal(info2, trip, in_body, /* is_min= */ false);
   // Positive range vs. positive or negative range.
   if (IsConstantValue(v1_min) && v1_min.b_constant >= 0) {
     if (IsConstantValue(v2_min) && v2_min.b_constant >= 0) {
@@ -825,10 +825,10 @@
     return DivRangeAndConstant(value, info1, trip, in_body, is_min);
   }
   // Interval ranges.
-  Value v1_min = GetVal(info1, trip, in_body, /* is_min */ true);
-  Value v1_max = GetVal(info1, trip, in_body, /* is_min */ false);
-  Value v2_min = GetVal(info2, trip, in_body, /* is_min */ true);
-  Value v2_max = GetVal(info2, trip, in_body, /* is_min */ false);
+  Value v1_min = GetVal(info1, trip, in_body, /* is_min= */ true);
+  Value v1_max = GetVal(info1, trip, in_body, /* is_min= */ false);
+  Value v2_min = GetVal(info2, trip, in_body, /* is_min= */ true);
+  Value v2_max = GetVal(info2, trip, in_body, /* is_min= */ false);
   // Positive range vs. positive or negative range.
   if (IsConstantValue(v1_min) && v1_min.b_constant >= 0) {
     if (IsConstantValue(v2_min) && v2_min.b_constant >= 0) {
@@ -1019,10 +1019,10 @@
   // Code generation for taken test: generate the code when requested or otherwise analyze
   // if code generation is feasible when taken test is needed.
   if (taken_test != nullptr) {
-    return GenerateCode(trip->op_b, nullptr, graph, block, taken_test, in_body, /* is_min */ false);
+    return GenerateCode(trip->op_b, nullptr, graph, block, taken_test, in_body, /* is_min= */ false);
   } else if (*needs_taken_test) {
     if (!GenerateCode(
-        trip->op_b, nullptr, nullptr, nullptr, nullptr, in_body, /* is_min */ false)) {
+        trip->op_b, nullptr, nullptr, nullptr, nullptr, in_body, /* is_min= */ false)) {
       return false;
     }
   }
@@ -1030,9 +1030,9 @@
   return
       // Success on lower if invariant (not set), or code can be generated.
       ((info->induction_class == HInductionVarAnalysis::kInvariant) ||
-          GenerateCode(info, trip, graph, block, lower, in_body, /* is_min */ true)) &&
+          GenerateCode(info, trip, graph, block, lower, in_body, /* is_min= */ true)) &&
       // And success on upper.
-      GenerateCode(info, trip, graph, block, upper, in_body, /* is_min */ false);
+      GenerateCode(info, trip, graph, block, upper, in_body, /* is_min= */ false);
 }
 
 bool InductionVarRange::GenerateLastValuePolynomial(HInductionVarAnalysis::InductionInfo* info,
diff --git a/compiler/optimizing/induction_var_range_test.cc b/compiler/optimizing/induction_var_range_test.cc
index 223e08e..f6af384 100644
--- a/compiler/optimizing/induction_var_range_test.cc
+++ b/compiler/optimizing/induction_var_range_test.cc
@@ -252,24 +252,24 @@
 
   Value GetMin(HInductionVarAnalysis::InductionInfo* info,
                HInductionVarAnalysis::InductionInfo* trip) {
-    return range_.GetVal(info, trip, /* in_body */ true, /* is_min */ true);
+    return range_.GetVal(info, trip, /* in_body= */ true, /* is_min= */ true);
   }
 
   Value GetMax(HInductionVarAnalysis::InductionInfo* info,
                HInductionVarAnalysis::InductionInfo* trip) {
-    return range_.GetVal(info, trip, /* in_body */ true, /* is_min */ false);
+    return range_.GetVal(info, trip, /* in_body= */ true, /* is_min= */ false);
   }
 
   Value GetMul(HInductionVarAnalysis::InductionInfo* info1,
                HInductionVarAnalysis::InductionInfo* info2,
                bool is_min) {
-    return range_.GetMul(info1, info2, nullptr, /* in_body */ true, is_min);
+    return range_.GetMul(info1, info2, nullptr, /* in_body= */ true, is_min);
   }
 
   Value GetDiv(HInductionVarAnalysis::InductionInfo* info1,
                HInductionVarAnalysis::InductionInfo* info2,
                bool is_min) {
-    return range_.GetDiv(info1, info2, nullptr, /* in_body */ true, is_min);
+    return range_.GetDiv(info1, info2, nullptr, /* in_body= */ true, is_min);
   }
 
   Value GetRem(HInductionVarAnalysis::InductionInfo* info1,
diff --git a/compiler/optimizing/inliner.cc b/compiler/optimizing/inliner.cc
index 417d794..854228b 100644
--- a/compiler/optimizing/inliner.cc
+++ b/compiler/optimizing/inliner.cc
@@ -175,7 +175,7 @@
         if (honor_noinline_directives) {
           // Debugging case: directives in method names control or assert on inlining.
           std::string callee_name = outer_compilation_unit_.GetDexFile()->PrettyMethod(
-              call->GetDexMethodIndex(), /* with_signature */ false);
+              call->GetDexMethodIndex(), /* with_signature= */ false);
           // Tests prevent inlining by having $noinline$ in their method names.
           if (callee_name.find("$noinline$") == std::string::npos) {
             if (TryInline(call)) {
@@ -504,7 +504,7 @@
     bool result = TryInlineAndReplace(invoke_instruction,
                                       actual_method,
                                       ReferenceTypeInfo::CreateInvalid(),
-                                      /* do_rtp */ true,
+                                      /* do_rtp= */ true,
                                       cha_devirtualize);
     if (result) {
       // Successfully inlined.
@@ -858,9 +858,9 @@
   HBasicBlock* bb_cursor = invoke_instruction->GetBlock();
   if (!TryInlineAndReplace(invoke_instruction,
                            resolved_method,
-                           ReferenceTypeInfo::Create(monomorphic_type, /* is_exact */ true),
-                           /* do_rtp */ false,
-                           /* cha_devirtualize */ false)) {
+                           ReferenceTypeInfo::Create(monomorphic_type, /* is_exact= */ true),
+                           /* do_rtp= */ false,
+                           /* cha_devirtualize= */ false)) {
     return false;
   }
 
@@ -871,7 +871,7 @@
                class_index,
                monomorphic_type,
                invoke_instruction,
-               /* with_deoptimization */ true);
+               /* with_deoptimization= */ true);
 
   // Run type propagation to get the guard typed, and eventually propagate the
   // type of the receiver.
@@ -879,7 +879,7 @@
                                      outer_compilation_unit_.GetClassLoader(),
                                      outer_compilation_unit_.GetDexCache(),
                                      handles_,
-                                     /* is_first_run */ false);
+                                     /* is_first_run= */ false);
   rtp_fixup.Run();
 
   MaybeRecordStat(stats_, MethodCompilationStat::kInlinedMonomorphicCall);
@@ -949,7 +949,7 @@
                                                                    klass,
                                                                    is_referrer,
                                                                    invoke_instruction->GetDexPc(),
-                                                                   /* needs_access_check */ false);
+                                                                   /* needs_access_check= */ false);
   HLoadClass::LoadKind kind = HSharpening::ComputeLoadClassKind(
       load_class, codegen_, caller_compilation_unit_);
   DCHECK(kind != HLoadClass::LoadKind::kInvalid)
@@ -1027,7 +1027,7 @@
     if (!class_index.IsValid() ||
         !TryBuildAndInline(invoke_instruction,
                            method,
-                           ReferenceTypeInfo::Create(handle, /* is_exact */ true),
+                           ReferenceTypeInfo::Create(handle, /* is_exact= */ true),
                            &return_replacement)) {
       all_targets_inlined = false;
     } else {
@@ -1079,7 +1079,7 @@
                                      outer_compilation_unit_.GetClassLoader(),
                                      outer_compilation_unit_.GetDexCache(),
                                      handles_,
-                                     /* is_first_run */ false);
+                                     /* is_first_run= */ false);
   rtp_fixup.Run();
   return true;
 }
@@ -1150,14 +1150,14 @@
 
 
   graph_->UpdateLoopAndTryInformationOfNewBlock(
-      then, original_invoke_block, /* replace_if_back_edge */ false);
+      then, original_invoke_block, /* replace_if_back_edge= */ false);
   graph_->UpdateLoopAndTryInformationOfNewBlock(
-      otherwise, original_invoke_block, /* replace_if_back_edge */ false);
+      otherwise, original_invoke_block, /* replace_if_back_edge= */ false);
 
   // In case the original invoke location was a back edge, we need to update
   // the loop to now have the merge block as a back edge.
   graph_->UpdateLoopAndTryInformationOfNewBlock(
-      merge, original_invoke_block, /* replace_if_back_edge */ true);
+      merge, original_invoke_block, /* replace_if_back_edge= */ true);
 }
 
 bool HInliner::TryInlinePolymorphicCallToSameTarget(
@@ -1275,7 +1275,7 @@
                                      outer_compilation_unit_.GetClassLoader(),
                                      outer_compilation_unit_.GetDexCache(),
                                      handles_,
-                                     /* is_first_run */ false);
+                                     /* is_first_run= */ false);
   rtp_fixup.Run();
 
   MaybeRecordStat(stats_, MethodCompilationStat::kInlinedPolymorphicCall);
@@ -1399,7 +1399,7 @@
                              outer_compilation_unit_.GetClassLoader(),
                              outer_compilation_unit_.GetDexCache(),
                              handles_,
-                             /* is_first_run */ false).Run();
+                             /* is_first_run= */ false).Run();
   }
   return true;
 }
@@ -1625,7 +1625,8 @@
                                  [](uint16_t index) { return index != DexFile::kDexNoIndex16; }));
 
       // Create HInstanceFieldSet for each IPUT that stores non-zero data.
-      HInstruction* obj = GetInvokeInputForArgVRegIndex(invoke_instruction, /* this */ 0u);
+      HInstruction* obj = GetInvokeInputForArgVRegIndex(invoke_instruction,
+                                                        /* arg_vreg_index= */ 0u);
       bool needs_constructor_barrier = false;
       for (size_t i = 0; i != number_of_iputs; ++i) {
         HInstruction* value = GetInvokeInputForArgVRegIndex(invoke_instruction, iput_args[i]);
@@ -1667,7 +1668,7 @@
     REQUIRES_SHARED(Locks::mutator_lock_) {
   ClassLinker* class_linker = Runtime::Current()->GetClassLinker();
   ArtField* resolved_field =
-      class_linker->LookupResolvedField(field_index, referrer, /* is_static */ false);
+      class_linker->LookupResolvedField(field_index, referrer, /* is_static= */ false);
   DCHECK(resolved_field != nullptr);
   HInstanceFieldGet* iget = new (graph_->GetAllocator()) HInstanceFieldGet(
       obj,
@@ -1680,7 +1681,7 @@
       *referrer->GetDexFile(),
       // Read barrier generates a runtime call in slow path and we need a valid
       // dex pc for the associated stack map. 0 is bogus but valid. Bug: 26854537.
-      /* dex_pc */ 0);
+      /* dex_pc= */ 0);
   if (iget->GetType() == DataType::Type::kReference) {
     // Use the same dex_cache that we used for field lookup as the hint_dex_cache.
     Handle<mirror::DexCache> dex_cache = handles_->NewHandle(referrer->GetDexCache());
@@ -1688,7 +1689,7 @@
                                  outer_compilation_unit_.GetClassLoader(),
                                  dex_cache,
                                  handles_,
-                                 /* is_first_run */ false);
+                                 /* is_first_run= */ false);
     rtp.Visit(iget);
   }
   return iget;
@@ -1702,7 +1703,7 @@
     REQUIRES_SHARED(Locks::mutator_lock_) {
   ClassLinker* class_linker = Runtime::Current()->GetClassLinker();
   ArtField* resolved_field =
-      class_linker->LookupResolvedField(field_index, referrer, /* is_static */ false);
+      class_linker->LookupResolvedField(field_index, referrer, /* is_static= */ false);
   DCHECK(resolved_field != nullptr);
   if (is_final != nullptr) {
     // This information is needed only for constructors.
@@ -1721,7 +1722,7 @@
       *referrer->GetDexFile(),
       // Read barrier generates a runtime call in slow path and we need a valid
       // dex pc for the associated stack map. 0 is bogus but valid. Bug: 26854537.
-      /* dex_pc */ 0);
+      /* dex_pc= */ 0);
   return iput;
 }
 
@@ -1777,7 +1778,7 @@
       resolved_method->GetDeclaringClass()->GetDexClassDefIndex(),
       method_index,
       resolved_method->GetAccessFlags(),
-      /* verified_method */ nullptr,
+      /* verified_method= */ nullptr,
       dex_cache,
       compiling_class);
 
@@ -1797,7 +1798,7 @@
       codegen_->GetCompilerOptions().GetInstructionSet(),
       invoke_type,
       graph_->IsDebuggable(),
-      /* osr */ false,
+      /* osr= */ false,
       caller_instruction_counter);
   callee_graph->SetArtMethod(resolved_method);
 
@@ -1878,7 +1879,7 @@
                              outer_compilation_unit_.GetClassLoader(),
                              dex_compilation_unit.GetDexCache(),
                              handles_,
-                             /* is_first_run */ false).Run();
+                             /* is_first_run= */ false).Run();
   }
 
   RunOptimizations(callee_graph, code_item, dex_compilation_unit);
@@ -2102,7 +2103,7 @@
   // is more specific than the class which declares the method.
   if (!resolved_method->IsStatic()) {
     if (IsReferenceTypeRefinement(GetClassRTI(resolved_method->GetDeclaringClass()),
-                                  /* declared_can_be_null */ false,
+                                  /* declared_can_be_null= */ false,
                                   invoke_instruction->InputAt(0u))) {
       return true;
     }
@@ -2122,7 +2123,7 @@
       ObjPtr<mirror::Class> param_cls = resolved_method->LookupResolvedClassFromTypeIndex(
           param_list->GetTypeItem(param_idx).type_idx_);
       if (IsReferenceTypeRefinement(GetClassRTI(param_cls),
-                                    /* declared_can_be_null */ true,
+                                    /* declared_can_be_null= */ true,
                                     input)) {
         return true;
       }
@@ -2139,7 +2140,7 @@
     if (return_replacement->GetType() == DataType::Type::kReference) {
       // Test if the return type is a refinement of the declared return type.
       if (IsReferenceTypeRefinement(invoke_instruction->GetReferenceTypeInfo(),
-                                    /* declared_can_be_null */ true,
+                                    /* declared_can_be_null= */ true,
                                     return_replacement)) {
         return true;
       } else if (return_replacement->IsInstanceFieldGet()) {
diff --git a/compiler/optimizing/instruction_builder.cc b/compiler/optimizing/instruction_builder.cc
index 448fed9..b6ef2b6 100644
--- a/compiler/optimizing/instruction_builder.cc
+++ b/compiler/optimizing/instruction_builder.cc
@@ -434,7 +434,7 @@
   HInvokeStaticOrDirect::DispatchInfo dispatch_info = {
       HInvokeStaticOrDirect::MethodLoadKind::kRuntimeCall,
       HInvokeStaticOrDirect::CodePtrLocation::kCallArtMethod,
-      /* method_load_data */ 0u
+      /* method_load_data= */ 0u
   };
   InvokeType invoke_type = dex_compilation_unit_->IsStatic() ? kStatic : kDirect;
   HInvokeStaticOrDirect* invoke = new (allocator_) HInvokeStaticOrDirect(
@@ -449,7 +449,7 @@
       target_method,
       HInvokeStaticOrDirect::ClinitCheckRequirement::kNone);
   RangeInstructionOperands operands(graph_->GetNumberOfVRegs() - in_vregs, in_vregs);
-  HandleInvoke(invoke, operands, dex_file_->GetMethodShorty(method_idx), /* is_unresolved */ false);
+  HandleInvoke(invoke, operands, dex_file_->GetMethodShorty(method_idx), /* is_unresolved= */ false);
 
   // Add the return instruction.
   if (return_type_ == DataType::Type::kVoid) {
@@ -468,7 +468,7 @@
 ArenaBitVector* HInstructionBuilder::FindNativeDebugInfoLocations() {
   ArenaBitVector* locations = ArenaBitVector::Create(local_allocator_,
                                                      code_item_accessor_.InsnsSizeInCodeUnits(),
-                                                     /* expandable */ false,
+                                                     /* expandable= */ false,
                                                      kArenaAllocGraphBuilder);
   locations->ClearAllBits();
   // The visitor gets called when the line number changes.
@@ -567,7 +567,7 @@
                                                               referrer_method_id.class_idx_,
                                                               parameter_index++,
                                                               DataType::Type::kReference,
-                                                              /* is_this */ true);
+                                                              /* is_this= */ true);
     AppendInstruction(parameter);
     UpdateLocal(locals_index++, parameter);
     number_of_parameters--;
@@ -584,7 +584,7 @@
         arg_types->GetTypeItem(shorty_pos - 1).type_idx_,
         parameter_index++,
         DataType::FromShorty(shorty[shorty_pos]),
-        /* is_this */ false);
+        /* is_this= */ false);
     ++shorty_pos;
     AppendInstruction(parameter);
     // Store the parameter value in the local that the dex code will use
@@ -926,7 +926,7 @@
                                                          dex_pc,
                                                          method_idx,
                                                          invoke_type);
-    return HandleInvoke(invoke, operands, shorty, /* is_unresolved */ true);
+    return HandleInvoke(invoke, operands, shorty, /* is_unresolved= */ true);
   }
 
   // Replace calls to String.<init> with StringFactory.
@@ -945,10 +945,10 @@
     HInvoke* invoke = new (allocator_) HInvokeStaticOrDirect(
         allocator_,
         number_of_arguments - 1,
-        DataType::Type::kReference /*return_type */,
+        /* return_type= */ DataType::Type::kReference,
         dex_pc,
         method_idx,
-        nullptr /* resolved_method */,
+        /* resolved_method= */ nullptr,
         dispatch_info,
         invoke_type,
         target_method,
@@ -1010,7 +1010,7 @@
                                                resolved_method,
                                                ImTable::GetImtIndex(resolved_method));
   }
-  return HandleInvoke(invoke, operands, shorty, /* is_unresolved */ false, clinit_check);
+  return HandleInvoke(invoke, operands, shorty, /* is_unresolved= */ false, clinit_check);
 }
 
 bool HInstructionBuilder::BuildInvokePolymorphic(uint32_t dex_pc,
@@ -1026,7 +1026,7 @@
                                                         return_type,
                                                         dex_pc,
                                                         method_idx);
-  return HandleInvoke(invoke, operands, shorty, /* is_unresolved */ false);
+  return HandleInvoke(invoke, operands, shorty, /* is_unresolved= */ false);
 }
 
 
@@ -1042,7 +1042,7 @@
                                                    call_site_idx,
                                                    return_type,
                                                    dex_pc);
-  return HandleInvoke(invoke, operands, shorty, /* is_unresolved */ false);
+  return HandleInvoke(invoke, operands, shorty, /* is_unresolved= */ false);
 }
 
 HNewInstance* HInstructionBuilder::BuildNewInstance(dex::TypeIndex type_index, uint32_t dex_pc) {
@@ -1370,7 +1370,7 @@
                                      klass->GetDexFile(),
                                      klass,
                                      dex_pc,
-                                     /* needs_access_check */ false);
+                                     /* needs_access_check= */ false);
     if (cls != nullptr) {
       *clinit_check_requirement = HInvokeStaticOrDirect::ClinitCheckRequirement::kExplicit;
       clinit_check = new (allocator_) HClinitCheck(cls, dex_pc);
@@ -1539,7 +1539,7 @@
   }
 
   ScopedObjectAccess soa(Thread::Current());
-  ArtField* resolved_field = ResolveField(field_index, /* is_static */ false, is_put);
+  ArtField* resolved_field = ResolveField(field_index, /* is_static= */ false, is_put);
 
   // Generate an explicit null check on the reference, unless the field access
   // is unresolved. In that case, we rely on the runtime to perform various
@@ -1673,7 +1673,7 @@
   uint16_t field_index = instruction.VRegB_21c();
 
   ScopedObjectAccess soa(Thread::Current());
-  ArtField* resolved_field = ResolveField(field_index, /* is_static */ true, is_put);
+  ArtField* resolved_field = ResolveField(field_index, /* is_static= */ true, is_put);
 
   if (resolved_field == nullptr) {
     MaybeRecordStat(compilation_stats_,
@@ -1690,7 +1690,7 @@
                                         klass->GetDexFile(),
                                         klass,
                                         dex_pc,
-                                        /* needs_access_check */ false);
+                                        /* needs_access_check= */ false);
 
   if (constant == nullptr) {
     // The class cannot be referenced from this compiled code. Generate
@@ -2946,7 +2946,7 @@
     case Instruction::IGET_CHAR_QUICK:
     case Instruction::IGET_SHORT:
     case Instruction::IGET_SHORT_QUICK: {
-      if (!BuildInstanceFieldAccess(instruction, dex_pc, /* is_put */ false, quicken_index)) {
+      if (!BuildInstanceFieldAccess(instruction, dex_pc, /* is_put= */ false, quicken_index)) {
         return false;
       }
       break;
@@ -2966,7 +2966,7 @@
     case Instruction::IPUT_CHAR_QUICK:
     case Instruction::IPUT_SHORT:
     case Instruction::IPUT_SHORT_QUICK: {
-      if (!BuildInstanceFieldAccess(instruction, dex_pc, /* is_put */ true, quicken_index)) {
+      if (!BuildInstanceFieldAccess(instruction, dex_pc, /* is_put= */ true, quicken_index)) {
         return false;
       }
       break;
@@ -2979,7 +2979,7 @@
     case Instruction::SGET_BYTE:
     case Instruction::SGET_CHAR:
     case Instruction::SGET_SHORT: {
-      BuildStaticFieldAccess(instruction, dex_pc, /* is_put */ false);
+      BuildStaticFieldAccess(instruction, dex_pc, /* is_put= */ false);
       break;
     }
 
@@ -2990,7 +2990,7 @@
     case Instruction::SPUT_BYTE:
     case Instruction::SPUT_CHAR:
     case Instruction::SPUT_SHORT: {
-      BuildStaticFieldAccess(instruction, dex_pc, /* is_put */ true);
+      BuildStaticFieldAccess(instruction, dex_pc, /* is_put= */ true);
       break;
     }
 
diff --git a/compiler/optimizing/instruction_simplifier.cc b/compiler/optimizing/instruction_simplifier.cc
index 4c6d6ba..a433d7e 100644
--- a/compiler/optimizing/instruction_simplifier.cc
+++ b/compiler/optimizing/instruction_simplifier.cc
@@ -372,7 +372,7 @@
       // (as defined by shift semantics). This ensures other
       // optimizations do not need to special case for such situations.
       DCHECK_EQ(shift_amount->GetType(), DataType::Type::kInt32);
-      instruction->ReplaceInput(GetGraph()->GetIntConstant(masked_cst), /* index */ 1);
+      instruction->ReplaceInput(GetGraph()->GetIntConstant(masked_cst), /* index= */ 1);
       RecordSimplification();
       return;
     }
@@ -2361,17 +2361,17 @@
   ArenaAllocator* allocator = GetGraph()->GetAllocator();
   // We treat String as an array to allow DCE and BCE to seamlessly work on strings,
   // so create the HArrayLength, HBoundsCheck and HArrayGet.
-  HArrayLength* length = new (allocator) HArrayLength(str, dex_pc, /* is_string_length */ true);
+  HArrayLength* length = new (allocator) HArrayLength(str, dex_pc, /* is_string_length= */ true);
   invoke->GetBlock()->InsertInstructionBefore(length, invoke);
   HBoundsCheck* bounds_check = new (allocator) HBoundsCheck(
-      index, length, dex_pc, /* is_string_char_at */ true);
+      index, length, dex_pc, /* is_string_char_at= */ true);
   invoke->GetBlock()->InsertInstructionBefore(bounds_check, invoke);
   HArrayGet* array_get = new (allocator) HArrayGet(str,
                                                    bounds_check,
                                                    DataType::Type::kUint16,
                                                    SideEffects::None(),  // Strings are immutable.
                                                    dex_pc,
-                                                   /* is_string_char_at */ true);
+                                                   /* is_string_char_at= */ true);
   invoke->GetBlock()->ReplaceAndRemoveInstructionWith(invoke, array_get);
   bounds_check->CopyEnvironmentFrom(invoke->GetEnvironment());
   GetGraph()->SetHasBoundsChecks(true);
@@ -2383,7 +2383,7 @@
   // We treat String as an array to allow DCE and BCE to seamlessly work on strings,
   // so create the HArrayLength.
   HArrayLength* length =
-      new (GetGraph()->GetAllocator()) HArrayLength(str, dex_pc, /* is_string_length */ true);
+      new (GetGraph()->GetAllocator()) HArrayLength(str, dex_pc, /* is_string_length= */ true);
   HInstruction* replacement;
   if (invoke->GetIntrinsic() == Intrinsics::kStringIsEmpty) {
     // For String.isEmpty(), create the `HEqual` representing the `length == 0`.
@@ -2534,28 +2534,28 @@
       SimplifySystemArrayCopy(instruction);
       break;
     case Intrinsics::kIntegerRotateRight:
-      SimplifyRotate(instruction, /* is_left */ false, DataType::Type::kInt32);
+      SimplifyRotate(instruction, /* is_left= */ false, DataType::Type::kInt32);
       break;
     case Intrinsics::kLongRotateRight:
-      SimplifyRotate(instruction, /* is_left */ false, DataType::Type::kInt64);
+      SimplifyRotate(instruction, /* is_left= */ false, DataType::Type::kInt64);
       break;
     case Intrinsics::kIntegerRotateLeft:
-      SimplifyRotate(instruction, /* is_left */ true, DataType::Type::kInt32);
+      SimplifyRotate(instruction, /* is_left= */ true, DataType::Type::kInt32);
       break;
     case Intrinsics::kLongRotateLeft:
-      SimplifyRotate(instruction, /* is_left */ true, DataType::Type::kInt64);
+      SimplifyRotate(instruction, /* is_left= */ true, DataType::Type::kInt64);
       break;
     case Intrinsics::kIntegerCompare:
-      SimplifyCompare(instruction, /* is_signum */ false, DataType::Type::kInt32);
+      SimplifyCompare(instruction, /* is_signum= */ false, DataType::Type::kInt32);
       break;
     case Intrinsics::kLongCompare:
-      SimplifyCompare(instruction, /* is_signum */ false, DataType::Type::kInt64);
+      SimplifyCompare(instruction, /* is_signum= */ false, DataType::Type::kInt64);
       break;
     case Intrinsics::kIntegerSignum:
-      SimplifyCompare(instruction, /* is_signum */ true, DataType::Type::kInt32);
+      SimplifyCompare(instruction, /* is_signum= */ true, DataType::Type::kInt32);
       break;
     case Intrinsics::kLongSignum:
-      SimplifyCompare(instruction, /* is_signum */ true, DataType::Type::kInt64);
+      SimplifyCompare(instruction, /* is_signum= */ true, DataType::Type::kInt64);
       break;
     case Intrinsics::kFloatIsNaN:
     case Intrinsics::kDoubleIsNaN:
diff --git a/compiler/optimizing/instruction_simplifier_arm.cc b/compiler/optimizing/instruction_simplifier_arm.cc
index f968c19..01e9cff 100644
--- a/compiler/optimizing/instruction_simplifier_arm.cc
+++ b/compiler/optimizing/instruction_simplifier_arm.cc
@@ -43,11 +43,11 @@
   bool TryMergeIntoUsersShifterOperand(HInstruction* instruction);
   bool TryMergeIntoShifterOperand(HInstruction* use, HInstruction* bitfield_op, bool do_merge);
   bool CanMergeIntoShifterOperand(HInstruction* use, HInstruction* bitfield_op) {
-    return TryMergeIntoShifterOperand(use, bitfield_op, /* do_merge */ false);
+    return TryMergeIntoShifterOperand(use, bitfield_op, /* do_merge= */ false);
   }
   bool MergeIntoShifterOperand(HInstruction* use, HInstruction* bitfield_op) {
     DCHECK(CanMergeIntoShifterOperand(use, bitfield_op));
-    return TryMergeIntoShifterOperand(use, bitfield_op, /* do_merge */ true);
+    return TryMergeIntoShifterOperand(use, bitfield_op, /* do_merge= */ true);
   }
 
   /**
diff --git a/compiler/optimizing/instruction_simplifier_arm64.cc b/compiler/optimizing/instruction_simplifier_arm64.cc
index b536cb4..e23decb 100644
--- a/compiler/optimizing/instruction_simplifier_arm64.cc
+++ b/compiler/optimizing/instruction_simplifier_arm64.cc
@@ -45,11 +45,11 @@
                                   HInstruction* bitfield_op,
                                   bool do_merge);
   bool CanMergeIntoShifterOperand(HInstruction* use, HInstruction* bitfield_op) {
-    return TryMergeIntoShifterOperand(use, bitfield_op, /* do_merge */ false);
+    return TryMergeIntoShifterOperand(use, bitfield_op, /* do_merge= */ false);
   }
   bool MergeIntoShifterOperand(HInstruction* use, HInstruction* bitfield_op) {
     DCHECK(CanMergeIntoShifterOperand(use, bitfield_op));
-    return TryMergeIntoShifterOperand(use, bitfield_op, /* do_merge */ true);
+    return TryMergeIntoShifterOperand(use, bitfield_op, /* do_merge= */ true);
   }
 
   /**
diff --git a/compiler/optimizing/intrinsic_objects.cc b/compiler/optimizing/intrinsic_objects.cc
index 0374b4e..c345624 100644
--- a/compiler/optimizing/intrinsic_objects.cc
+++ b/compiler/optimizing/intrinsic_objects.cc
@@ -30,7 +30,7 @@
                                                                       ClassLinker* class_linker)
     REQUIRES_SHARED(Locks::mutator_lock_) {
   ObjPtr<mirror::Class> integer_cache_class = class_linker->LookupClass(
-      self, "Ljava/lang/Integer$IntegerCache;", /* class_linker */ nullptr);
+      self, "Ljava/lang/Integer$IntegerCache;", /* class_loader= */ nullptr);
   if (integer_cache_class == nullptr || !integer_cache_class->IsInitialized()) {
     return nullptr;
   }
diff --git a/compiler/optimizing/intrinsics.cc b/compiler/optimizing/intrinsics.cc
index 619cd8e..2721cb5 100644
--- a/compiler/optimizing/intrinsics.cc
+++ b/compiler/optimizing/intrinsics.cc
@@ -76,7 +76,7 @@
                                                     const char* descriptor)
         REQUIRES_SHARED(Locks::mutator_lock_) {
   ObjPtr<mirror::Class> klass =
-      class_linker->LookupClass(self, descriptor, /* class_loader */ nullptr);
+      class_linker->LookupClass(self, descriptor, /* class_loader= */ nullptr);
   DCHECK(klass != nullptr);
   DCHECK(klass->IsInitialized());
   return klass;
@@ -166,14 +166,14 @@
     Thread* self = Thread::Current();
     ScopedObjectAccess soa(self);
     ObjPtr<mirror::Class> cache_class = class_linker->LookupClass(
-        self, kIntegerCacheDescriptor, /* class_loader */ nullptr);
+        self, kIntegerCacheDescriptor, /* class_loader= */ nullptr);
     DCHECK(cache_class != nullptr);
     if (UNLIKELY(!cache_class->IsInitialized())) {
       LOG(WARNING) << "Image class " << cache_class->PrettyDescriptor() << " is uninitialized.";
       return;
     }
     ObjPtr<mirror::Class> integer_class =
-        class_linker->LookupClass(self, kIntegerDescriptor, /* class_loader */ nullptr);
+        class_linker->LookupClass(self, kIntegerDescriptor, /* class_loader= */ nullptr);
     DCHECK(integer_class != nullptr);
     if (UNLIKELY(!integer_class->IsInitialized())) {
       LOG(WARNING) << "Image class " << integer_class->PrettyDescriptor() << " is uninitialized.";
diff --git a/compiler/optimizing/intrinsics_arm64.cc b/compiler/optimizing/intrinsics_arm64.cc
index 0b17c9d..ae1650e 100644
--- a/compiler/optimizing/intrinsics_arm64.cc
+++ b/compiler/optimizing/intrinsics_arm64.cc
@@ -272,10 +272,10 @@
 }
 
 void IntrinsicCodeGeneratorARM64::VisitDoubleDoubleToRawLongBits(HInvoke* invoke) {
-  MoveFPToInt(invoke->GetLocations(), /* is64bit */ true, GetVIXLAssembler());
+  MoveFPToInt(invoke->GetLocations(), /* is64bit= */ true, GetVIXLAssembler());
 }
 void IntrinsicCodeGeneratorARM64::VisitDoubleLongBitsToDouble(HInvoke* invoke) {
-  MoveIntToFP(invoke->GetLocations(), /* is64bit */ true, GetVIXLAssembler());
+  MoveIntToFP(invoke->GetLocations(), /* is64bit= */ true, GetVIXLAssembler());
 }
 
 void IntrinsicLocationsBuilderARM64::VisitFloatFloatToRawIntBits(HInvoke* invoke) {
@@ -286,10 +286,10 @@
 }
 
 void IntrinsicCodeGeneratorARM64::VisitFloatFloatToRawIntBits(HInvoke* invoke) {
-  MoveFPToInt(invoke->GetLocations(), /* is64bit */ false, GetVIXLAssembler());
+  MoveFPToInt(invoke->GetLocations(), /* is64bit= */ false, GetVIXLAssembler());
 }
 void IntrinsicCodeGeneratorARM64::VisitFloatIntBitsToFloat(HInvoke* invoke) {
-  MoveIntToFP(invoke->GetLocations(), /* is64bit */ false, GetVIXLAssembler());
+  MoveIntToFP(invoke->GetLocations(), /* is64bit= */ false, GetVIXLAssembler());
 }
 
 static void CreateIntToIntLocations(ArenaAllocator* allocator, HInvoke* invoke) {
@@ -618,7 +618,7 @@
 }
 
 void IntrinsicCodeGeneratorARM64::VisitMathRoundDouble(HInvoke* invoke) {
-  GenMathRound(invoke, /* is_double */ true, GetVIXLAssembler());
+  GenMathRound(invoke, /* is_double= */ true, GetVIXLAssembler());
 }
 
 void IntrinsicLocationsBuilderARM64::VisitMathRoundFloat(HInvoke* invoke) {
@@ -626,7 +626,7 @@
 }
 
 void IntrinsicCodeGeneratorARM64::VisitMathRoundFloat(HInvoke* invoke) {
-  GenMathRound(invoke, /* is_double */ false, GetVIXLAssembler());
+  GenMathRound(invoke, /* is_double= */ false, GetVIXLAssembler());
 }
 
 void IntrinsicLocationsBuilderARM64::VisitMemoryPeekByte(HInvoke* invoke) {
@@ -752,13 +752,13 @@
                                                    trg_loc,
                                                    base,
                                                    MemOperand(temp.X()),
-                                                   /* needs_null_check */ false,
+                                                   /* needs_null_check= */ false,
                                                    is_volatile);
   } else {
     // Other cases.
     MemOperand mem_op(base.X(), offset);
     if (is_volatile) {
-      codegen->LoadAcquire(invoke, trg, mem_op, /* needs_null_check */ true);
+      codegen->LoadAcquire(invoke, trg, mem_op, /* needs_null_check= */ true);
     } else {
       codegen->Load(type, trg, mem_op);
     }
@@ -813,22 +813,22 @@
 }
 
 void IntrinsicCodeGeneratorARM64::VisitUnsafeGet(HInvoke* invoke) {
-  GenUnsafeGet(invoke, DataType::Type::kInt32, /* is_volatile */ false, codegen_);
+  GenUnsafeGet(invoke, DataType::Type::kInt32, /* is_volatile= */ false, codegen_);
 }
 void IntrinsicCodeGeneratorARM64::VisitUnsafeGetVolatile(HInvoke* invoke) {
-  GenUnsafeGet(invoke, DataType::Type::kInt32, /* is_volatile */ true, codegen_);
+  GenUnsafeGet(invoke, DataType::Type::kInt32, /* is_volatile= */ true, codegen_);
 }
 void IntrinsicCodeGeneratorARM64::VisitUnsafeGetLong(HInvoke* invoke) {
-  GenUnsafeGet(invoke, DataType::Type::kInt64, /* is_volatile */ false, codegen_);
+  GenUnsafeGet(invoke, DataType::Type::kInt64, /* is_volatile= */ false, codegen_);
 }
 void IntrinsicCodeGeneratorARM64::VisitUnsafeGetLongVolatile(HInvoke* invoke) {
-  GenUnsafeGet(invoke, DataType::Type::kInt64, /* is_volatile */ true, codegen_);
+  GenUnsafeGet(invoke, DataType::Type::kInt64, /* is_volatile= */ true, codegen_);
 }
 void IntrinsicCodeGeneratorARM64::VisitUnsafeGetObject(HInvoke* invoke) {
-  GenUnsafeGet(invoke, DataType::Type::kReference, /* is_volatile */ false, codegen_);
+  GenUnsafeGet(invoke, DataType::Type::kReference, /* is_volatile= */ false, codegen_);
 }
 void IntrinsicCodeGeneratorARM64::VisitUnsafeGetObjectVolatile(HInvoke* invoke) {
-  GenUnsafeGet(invoke, DataType::Type::kReference, /* is_volatile */ true, codegen_);
+  GenUnsafeGet(invoke, DataType::Type::kReference, /* is_volatile= */ true, codegen_);
 }
 
 static void CreateIntIntIntIntToVoid(ArenaAllocator* allocator, HInvoke* invoke) {
@@ -896,7 +896,7 @@
     }
 
     if (is_volatile || is_ordered) {
-      codegen->StoreRelease(invoke, type, source, mem_op, /* needs_null_check */ false);
+      codegen->StoreRelease(invoke, type, source, mem_op, /* needs_null_check= */ false);
     } else {
       codegen->Store(type, source, mem_op);
     }
@@ -911,64 +911,64 @@
 void IntrinsicCodeGeneratorARM64::VisitUnsafePut(HInvoke* invoke) {
   GenUnsafePut(invoke,
                DataType::Type::kInt32,
-               /* is_volatile */ false,
-               /* is_ordered */ false,
+               /* is_volatile= */ false,
+               /* is_ordered= */ false,
                codegen_);
 }
 void IntrinsicCodeGeneratorARM64::VisitUnsafePutOrdered(HInvoke* invoke) {
   GenUnsafePut(invoke,
                DataType::Type::kInt32,
-               /* is_volatile */ false,
-               /* is_ordered */ true,
+               /* is_volatile= */ false,
+               /* is_ordered= */ true,
                codegen_);
 }
 void IntrinsicCodeGeneratorARM64::VisitUnsafePutVolatile(HInvoke* invoke) {
   GenUnsafePut(invoke,
                DataType::Type::kInt32,
-               /* is_volatile */ true,
-               /* is_ordered */ false,
+               /* is_volatile= */ true,
+               /* is_ordered= */ false,
                codegen_);
 }
 void IntrinsicCodeGeneratorARM64::VisitUnsafePutObject(HInvoke* invoke) {
   GenUnsafePut(invoke,
                DataType::Type::kReference,
-               /* is_volatile */ false,
-               /* is_ordered */ false,
+               /* is_volatile= */ false,
+               /* is_ordered= */ false,
                codegen_);
 }
 void IntrinsicCodeGeneratorARM64::VisitUnsafePutObjectOrdered(HInvoke* invoke) {
   GenUnsafePut(invoke,
                DataType::Type::kReference,
-               /* is_volatile */ false,
-               /* is_ordered */ true,
+               /* is_volatile= */ false,
+               /* is_ordered= */ true,
                codegen_);
 }
 void IntrinsicCodeGeneratorARM64::VisitUnsafePutObjectVolatile(HInvoke* invoke) {
   GenUnsafePut(invoke,
                DataType::Type::kReference,
-               /* is_volatile */ true,
-               /* is_ordered */ false,
+               /* is_volatile= */ true,
+               /* is_ordered= */ false,
                codegen_);
 }
 void IntrinsicCodeGeneratorARM64::VisitUnsafePutLong(HInvoke* invoke) {
   GenUnsafePut(invoke,
                DataType::Type::kInt64,
-               /* is_volatile */ false,
-               /* is_ordered */ false,
+               /* is_volatile= */ false,
+               /* is_ordered= */ false,
                codegen_);
 }
 void IntrinsicCodeGeneratorARM64::VisitUnsafePutLongOrdered(HInvoke* invoke) {
   GenUnsafePut(invoke,
                DataType::Type::kInt64,
-               /* is_volatile */ false,
-               /* is_ordered */ true,
+               /* is_volatile= */ false,
+               /* is_ordered= */ true,
                codegen_);
 }
 void IntrinsicCodeGeneratorARM64::VisitUnsafePutLongVolatile(HInvoke* invoke) {
   GenUnsafePut(invoke,
                DataType::Type::kInt64,
-               /* is_volatile */ true,
-               /* is_ordered */ false,
+               /* is_volatile= */ true,
+               /* is_ordered= */ false,
                codegen_);
 }
 
@@ -1638,7 +1638,7 @@
 }
 
 void IntrinsicCodeGeneratorARM64::VisitStringIndexOf(HInvoke* invoke) {
-  GenerateVisitStringIndexOf(invoke, GetVIXLAssembler(), codegen_, /* start_at_zero */ true);
+  GenerateVisitStringIndexOf(invoke, GetVIXLAssembler(), codegen_, /* start_at_zero= */ true);
 }
 
 void IntrinsicLocationsBuilderARM64::VisitStringIndexOfAfter(HInvoke* invoke) {
@@ -1654,7 +1654,7 @@
 }
 
 void IntrinsicCodeGeneratorARM64::VisitStringIndexOfAfter(HInvoke* invoke) {
-  GenerateVisitStringIndexOf(invoke, GetVIXLAssembler(), codegen_, /* start_at_zero */ false);
+  GenerateVisitStringIndexOf(invoke, GetVIXLAssembler(), codegen_, /* start_at_zero= */ false);
 }
 
 void IntrinsicLocationsBuilderARM64::VisitStringNewStringFromBytes(HInvoke* invoke) {
@@ -2456,8 +2456,8 @@
                                                           src.W(),
                                                           class_offset,
                                                           temp3_loc,
-                                                          /* needs_null_check */ false,
-                                                          /* use_load_acquire */ false);
+                                                          /* needs_null_check= */ false,
+                                                          /* use_load_acquire= */ false);
           // Bail out if the source is not a non primitive array.
           // /* HeapReference<Class> */ temp1 = temp1->component_type_
           codegen_->GenerateFieldLoadWithBakerReadBarrier(invoke,
@@ -2465,8 +2465,8 @@
                                                           temp1,
                                                           component_offset,
                                                           temp3_loc,
-                                                          /* needs_null_check */ false,
-                                                          /* use_load_acquire */ false);
+                                                          /* needs_null_check= */ false,
+                                                          /* use_load_acquire= */ false);
           __ Cbz(temp1, intrinsic_slow_path->GetEntryLabel());
           // If heap poisoning is enabled, `temp1` has been unpoisoned
           // by the the previous call to GenerateFieldLoadWithBakerReadBarrier.
@@ -2482,8 +2482,8 @@
                                                         dest.W(),
                                                         class_offset,
                                                         temp3_loc,
-                                                        /* needs_null_check */ false,
-                                                        /* use_load_acquire */ false);
+                                                        /* needs_null_check= */ false,
+                                                        /* use_load_acquire= */ false);
 
         if (!optimizations.GetDestinationIsNonPrimitiveArray()) {
           // Bail out if the destination is not a non primitive array.
@@ -2499,8 +2499,8 @@
                                                           temp1,
                                                           component_offset,
                                                           temp3_loc,
-                                                          /* needs_null_check */ false,
-                                                          /* use_load_acquire */ false);
+                                                          /* needs_null_check= */ false,
+                                                          /* use_load_acquire= */ false);
           __ Cbz(temp2, intrinsic_slow_path->GetEntryLabel());
           // If heap poisoning is enabled, `temp2` has been unpoisoned
           // by the the previous call to GenerateFieldLoadWithBakerReadBarrier.
@@ -2518,8 +2518,8 @@
                                                         src.W(),
                                                         class_offset,
                                                         temp3_loc,
-                                                        /* needs_null_check */ false,
-                                                        /* use_load_acquire */ false);
+                                                        /* needs_null_check= */ false,
+                                                        /* use_load_acquire= */ false);
         // Note: if heap poisoning is on, we are comparing two unpoisoned references here.
         __ Cmp(temp1, temp2);
 
@@ -2532,8 +2532,8 @@
                                                           temp1,
                                                           component_offset,
                                                           temp3_loc,
-                                                          /* needs_null_check */ false,
-                                                          /* use_load_acquire */ false);
+                                                          /* needs_null_check= */ false,
+                                                          /* use_load_acquire= */ false);
           // /* HeapReference<Class> */ temp1 = temp1->super_class_
           // We do not need to emit a read barrier for the following
           // heap reference load, as `temp1` is only used in a
@@ -2616,16 +2616,16 @@
                                                         src.W(),
                                                         class_offset,
                                                         temp3_loc,
-                                                        /* needs_null_check */ false,
-                                                        /* use_load_acquire */ false);
+                                                        /* needs_null_check= */ false,
+                                                        /* use_load_acquire= */ false);
         // /* HeapReference<Class> */ temp2 = temp1->component_type_
         codegen_->GenerateFieldLoadWithBakerReadBarrier(invoke,
                                                         temp2_loc,
                                                         temp1,
                                                         component_offset,
                                                         temp3_loc,
-                                                        /* needs_null_check */ false,
-                                                        /* use_load_acquire */ false);
+                                                        /* needs_null_check= */ false,
+                                                        /* use_load_acquire= */ false);
         __ Cbz(temp2, intrinsic_slow_path->GetEntryLabel());
         // If heap poisoning is enabled, `temp2` has been unpoisoned
         // by the the previous call to GenerateFieldLoadWithBakerReadBarrier.
@@ -2779,7 +2779,7 @@
   }
 
   // We only need one card marking on the destination array.
-  codegen_->MarkGCCard(dest.W(), Register(), /* value_can_be_null */ false);
+  codegen_->MarkGCCard(dest.W(), Register(), /* value_can_be_null= */ false);
 
   __ Bind(intrinsic_slow_path->GetExitLabel());
 }
@@ -2812,7 +2812,7 @@
 }
 
 void IntrinsicCodeGeneratorARM64::VisitFloatIsInfinite(HInvoke* invoke) {
-  GenIsInfinite(invoke->GetLocations(), /* is64bit */ false, GetVIXLAssembler());
+  GenIsInfinite(invoke->GetLocations(), /* is64bit= */ false, GetVIXLAssembler());
 }
 
 void IntrinsicLocationsBuilderARM64::VisitDoubleIsInfinite(HInvoke* invoke) {
@@ -2820,7 +2820,7 @@
 }
 
 void IntrinsicCodeGeneratorARM64::VisitDoubleIsInfinite(HInvoke* invoke) {
-  GenIsInfinite(invoke->GetLocations(), /* is64bit */ true, GetVIXLAssembler());
+  GenIsInfinite(invoke->GetLocations(), /* is64bit= */ true, GetVIXLAssembler());
 }
 
 void IntrinsicLocationsBuilderARM64::VisitIntegerValueOf(HInvoke* invoke) {
@@ -2954,58 +2954,20 @@
   __ Mvn(out, tmp);
 }
 
-// The threshold for sizes of arrays to use the library provided implementation
-// of CRC32.updateBytes instead of the intrinsic.
-static constexpr int32_t kCRC32UpdateBytesThreshold = 64 * 1024;
-
-void IntrinsicLocationsBuilderARM64::VisitCRC32UpdateBytes(HInvoke* invoke) {
-  if (!codegen_->GetInstructionSetFeatures().HasCRC()) {
-    return;
-  }
-
-  LocationSummary* locations
-      = new (allocator_) LocationSummary(invoke,
-                                         LocationSummary::kCallOnSlowPath,
-                                         kIntrinsified);
-
-  locations->SetInAt(0, Location::RequiresRegister());
-  locations->SetInAt(1, Location::RequiresRegister());
-  locations->SetInAt(2, Location::RegisterOrConstant(invoke->InputAt(2)));
-  locations->SetInAt(3, Location::RequiresRegister());
-  locations->AddTemp(Location::RequiresRegister());
-  locations->SetOut(Location::RequiresRegister());
-}
-
-// Lower the invoke of CRC32.updateBytes(int crc, byte[] b, int off, int len)
+// Generate code using CRC32 instructions which calculates
+// a CRC32 value of a byte.
 //
-// Note: The intrinsic is not used if len exceeds a threshold.
-void IntrinsicCodeGeneratorARM64::VisitCRC32UpdateBytes(HInvoke* invoke) {
-  DCHECK(codegen_->GetInstructionSetFeatures().HasCRC());
-
-  auto masm = GetVIXLAssembler();
-  auto locations = invoke->GetLocations();
-
-  auto slow_path =
-    new (codegen_->GetScopedAllocator()) IntrinsicSlowPathARM64(invoke);
-  codegen_->AddSlowPath(slow_path);
-
-  Register length = WRegisterFrom(locations->InAt(3));
-  __ Cmp(length, kCRC32UpdateBytesThreshold);
-  __ B(slow_path->GetEntryLabel(), hi);
-
-  const uint32_t array_data_offset =
-      mirror::Array::DataOffset(Primitive::kPrimByte).Uint32Value();
-  Register ptr = XRegisterFrom(locations->GetTemp(0));
-  Register array = XRegisterFrom(locations->InAt(1));
-  auto offset = locations->InAt(2);
-  if (offset.IsConstant()) {
-    int32_t offset_value = offset.GetConstant()->AsIntConstant()->GetValue();
-    __ Add(ptr, array, array_data_offset + offset_value);
-  } else {
-    __ Add(ptr, array, array_data_offset);
-    __ Add(ptr, ptr, XRegisterFrom(offset));
-  }
-
+// Parameters:
+//   masm   - VIXL macro assembler
+//   crc    - a register holding an initial CRC value
+//   ptr    - a register holding a memory address of bytes
+//   length - a register holding a number of bytes to process
+//   out    - a register to put a result of calculation
+static void GenerateCodeForCalculationCRC32ValueOfBytes(MacroAssembler* masm,
+                                                        const Register& crc,
+                                                        const Register& ptr,
+                                                        const Register& length,
+                                                        const Register& out) {
   // The algorithm of CRC32 of bytes is:
   //   crc = ~crc
   //   process a few first bytes to make the array 8-byte aligned
@@ -3029,8 +2991,7 @@
   Register len = temps.AcquireW();
   Register array_elem = temps.AcquireW();
 
-  Register out = WRegisterFrom(locations->Out());
-  __ Mvn(out, WRegisterFrom(locations->InAt(0)));
+  __ Mvn(out, crc);
   __ Mov(len, length);
 
   __ Tbz(ptr, 0, &aligned2);
@@ -3095,10 +3056,111 @@
 
   __ Bind(&done);
   __ Mvn(out, out);
+}
+
+// The threshold for sizes of arrays to use the library provided implementation
+// of CRC32.updateBytes instead of the intrinsic.
+static constexpr int32_t kCRC32UpdateBytesThreshold = 64 * 1024;
+
+void IntrinsicLocationsBuilderARM64::VisitCRC32UpdateBytes(HInvoke* invoke) {
+  if (!codegen_->GetInstructionSetFeatures().HasCRC()) {
+    return;
+  }
+
+  LocationSummary* locations =
+      new (allocator_) LocationSummary(invoke,
+                                       LocationSummary::kCallOnSlowPath,
+                                       kIntrinsified);
+
+  locations->SetInAt(0, Location::RequiresRegister());
+  locations->SetInAt(1, Location::RequiresRegister());
+  locations->SetInAt(2, Location::RegisterOrConstant(invoke->InputAt(2)));
+  locations->SetInAt(3, Location::RequiresRegister());
+  locations->AddTemp(Location::RequiresRegister());
+  locations->SetOut(Location::RequiresRegister());
+}
+
+// Lower the invoke of CRC32.updateBytes(int crc, byte[] b, int off, int len)
+//
+// Note: The intrinsic is not used if len exceeds a threshold.
+void IntrinsicCodeGeneratorARM64::VisitCRC32UpdateBytes(HInvoke* invoke) {
+  DCHECK(codegen_->GetInstructionSetFeatures().HasCRC());
+
+  auto masm = GetVIXLAssembler();
+  auto locations = invoke->GetLocations();
+
+  auto slow_path =
+    new (codegen_->GetScopedAllocator()) IntrinsicSlowPathARM64(invoke);
+  codegen_->AddSlowPath(slow_path);
+
+  Register length = WRegisterFrom(locations->InAt(3));
+  __ Cmp(length, kCRC32UpdateBytesThreshold);
+  __ B(slow_path->GetEntryLabel(), hi);
+
+  const uint32_t array_data_offset =
+      mirror::Array::DataOffset(Primitive::kPrimByte).Uint32Value();
+  Register ptr = XRegisterFrom(locations->GetTemp(0));
+  Register array = XRegisterFrom(locations->InAt(1));
+  auto offset = locations->InAt(2);
+  if (offset.IsConstant()) {
+    int32_t offset_value = offset.GetConstant()->AsIntConstant()->GetValue();
+    __ Add(ptr, array, array_data_offset + offset_value);
+  } else {
+    __ Add(ptr, array, array_data_offset);
+    __ Add(ptr, ptr, XRegisterFrom(offset));
+  }
+
+  Register crc = WRegisterFrom(locations->InAt(0));
+  Register out = WRegisterFrom(locations->Out());
+
+  GenerateCodeForCalculationCRC32ValueOfBytes(masm, crc, ptr, length, out);
 
   __ Bind(slow_path->GetExitLabel());
 }
 
+void IntrinsicLocationsBuilderARM64::VisitCRC32UpdateByteBuffer(HInvoke* invoke) {
+  if (!codegen_->GetInstructionSetFeatures().HasCRC()) {
+    return;
+  }
+
+  LocationSummary* locations =
+      new (allocator_) LocationSummary(invoke,
+                                       LocationSummary::kNoCall,
+                                       kIntrinsified);
+
+  locations->SetInAt(0, Location::RequiresRegister());
+  locations->SetInAt(1, Location::RequiresRegister());
+  locations->SetInAt(2, Location::RequiresRegister());
+  locations->SetInAt(3, Location::RequiresRegister());
+  locations->AddTemp(Location::RequiresRegister());
+  locations->SetOut(Location::RequiresRegister());
+}
+
+// Lower the invoke of CRC32.updateByteBuffer(int crc, long addr, int off, int len)
+//
+// There is no need to generate code checking if addr is 0.
+// The method updateByteBuffer is a private method of java.util.zip.CRC32.
+// This guarantees no calls outside of the CRC32 class.
+// An address of DirectBuffer is always passed to the call of updateByteBuffer.
+// It might be an implementation of an empty DirectBuffer which can use a zero
+// address but it must have the length to be zero. The current generated code
+// correctly works with the zero length.
+void IntrinsicCodeGeneratorARM64::VisitCRC32UpdateByteBuffer(HInvoke* invoke) {
+  DCHECK(codegen_->GetInstructionSetFeatures().HasCRC());
+
+  auto masm = GetVIXLAssembler();
+  auto locations = invoke->GetLocations();
+
+  Register addr = XRegisterFrom(locations->InAt(1));
+  Register ptr = XRegisterFrom(locations->GetTemp(0));
+  __ Add(ptr, addr, XRegisterFrom(locations->InAt(2)));
+
+  Register crc = WRegisterFrom(locations->InAt(0));
+  Register length = WRegisterFrom(locations->InAt(3));
+  Register out = WRegisterFrom(locations->Out());
+  GenerateCodeForCalculationCRC32ValueOfBytes(masm, crc, ptr, length, out);
+}
+
 UNIMPLEMENTED_INTRINSIC(ARM64, ReferenceGetReferent)
 
 UNIMPLEMENTED_INTRINSIC(ARM64, StringStringIndexOf);
diff --git a/compiler/optimizing/intrinsics_arm_vixl.cc b/compiler/optimizing/intrinsics_arm_vixl.cc
index 88f1457..396ff62 100644
--- a/compiler/optimizing/intrinsics_arm_vixl.cc
+++ b/compiler/optimizing/intrinsics_arm_vixl.cc
@@ -229,7 +229,7 @@
     assembler->MaybePoisonHeapReference(tmp);
     __ Str(tmp, MemOperand(dst_curr_addr, element_size, PostIndex));
     __ Cmp(src_curr_addr, src_stop_addr);
-    __ B(ne, &loop, /* far_target */ false);
+    __ B(ne, &loop, /* is_far_target= */ false);
     __ B(GetExitLabel());
   }
 
@@ -298,10 +298,10 @@
 }
 
 void IntrinsicCodeGeneratorARMVIXL::VisitDoubleDoubleToRawLongBits(HInvoke* invoke) {
-  MoveFPToInt(invoke->GetLocations(), /* is64bit */ true, GetAssembler());
+  MoveFPToInt(invoke->GetLocations(), /* is64bit= */ true, GetAssembler());
 }
 void IntrinsicCodeGeneratorARMVIXL::VisitDoubleLongBitsToDouble(HInvoke* invoke) {
-  MoveIntToFP(invoke->GetLocations(), /* is64bit */ true, GetAssembler());
+  MoveIntToFP(invoke->GetLocations(), /* is64bit= */ true, GetAssembler());
 }
 
 void IntrinsicLocationsBuilderARMVIXL::VisitFloatFloatToRawIntBits(HInvoke* invoke) {
@@ -312,10 +312,10 @@
 }
 
 void IntrinsicCodeGeneratorARMVIXL::VisitFloatFloatToRawIntBits(HInvoke* invoke) {
-  MoveFPToInt(invoke->GetLocations(), /* is64bit */ false, GetAssembler());
+  MoveFPToInt(invoke->GetLocations(), /* is64bit= */ false, GetAssembler());
 }
 void IntrinsicCodeGeneratorARMVIXL::VisitFloatIntBitsToFloat(HInvoke* invoke) {
-  MoveIntToFP(invoke->GetLocations(), /* is64bit */ false, GetAssembler());
+  MoveIntToFP(invoke->GetLocations(), /* is64bit= */ false, GetAssembler());
 }
 
 static void CreateIntToIntLocations(ArenaAllocator* allocator, HInvoke* invoke) {
@@ -355,7 +355,7 @@
     vixl32::Label end;
     vixl32::Label* final_label = codegen->GetFinalLabel(invoke, &end);
     __ Clz(out, in_reg_hi);
-    __ CompareAndBranchIfNonZero(in_reg_hi, final_label, /* far_target */ false);
+    __ CompareAndBranchIfNonZero(in_reg_hi, final_label, /* is_far_target= */ false);
     __ Clz(out, in_reg_lo);
     __ Add(out, out, 32);
     if (end.IsReferenced()) {
@@ -398,7 +398,7 @@
     vixl32::Label* final_label = codegen->GetFinalLabel(invoke, &end);
     __ Rbit(out, in_reg_lo);
     __ Clz(out, out);
-    __ CompareAndBranchIfNonZero(in_reg_lo, final_label, /* far_target */ false);
+    __ CompareAndBranchIfNonZero(in_reg_lo, final_label, /* is_far_target= */ false);
     __ Rbit(out, in_reg_hi);
     __ Clz(out, out);
     __ Add(out, out, 32);
@@ -476,7 +476,7 @@
 
   // For positive, zero or NaN inputs, rounding is done.
   __ Cmp(out_reg, 0);
-  __ B(ge, final_label, /* far_target */ false);
+  __ B(ge, final_label, /* is_far_target= */ false);
 
   // Handle input < 0 cases.
   // If input is negative but not a tie, previous result (round to nearest) is valid.
@@ -642,7 +642,7 @@
           __ Add(RegisterFrom(temp), base, Operand(offset));
           MemOperand src(RegisterFrom(temp), 0);
           codegen->GenerateFieldLoadWithBakerReadBarrier(
-              invoke, trg_loc, base, src, /* needs_null_check */ false);
+              invoke, trg_loc, base, src, /* needs_null_check= */ false);
           if (is_volatile) {
             __ Dmb(vixl32::ISH);
           }
@@ -733,22 +733,22 @@
 }
 
 void IntrinsicCodeGeneratorARMVIXL::VisitUnsafeGet(HInvoke* invoke) {
-  GenUnsafeGet(invoke, DataType::Type::kInt32, /* is_volatile */ false, codegen_);
+  GenUnsafeGet(invoke, DataType::Type::kInt32, /* is_volatile= */ false, codegen_);
 }
 void IntrinsicCodeGeneratorARMVIXL::VisitUnsafeGetVolatile(HInvoke* invoke) {
-  GenUnsafeGet(invoke, DataType::Type::kInt32, /* is_volatile */ true, codegen_);
+  GenUnsafeGet(invoke, DataType::Type::kInt32, /* is_volatile= */ true, codegen_);
 }
 void IntrinsicCodeGeneratorARMVIXL::VisitUnsafeGetLong(HInvoke* invoke) {
-  GenUnsafeGet(invoke, DataType::Type::kInt64, /* is_volatile */ false, codegen_);
+  GenUnsafeGet(invoke, DataType::Type::kInt64, /* is_volatile= */ false, codegen_);
 }
 void IntrinsicCodeGeneratorARMVIXL::VisitUnsafeGetLongVolatile(HInvoke* invoke) {
-  GenUnsafeGet(invoke, DataType::Type::kInt64, /* is_volatile */ true, codegen_);
+  GenUnsafeGet(invoke, DataType::Type::kInt64, /* is_volatile= */ true, codegen_);
 }
 void IntrinsicCodeGeneratorARMVIXL::VisitUnsafeGetObject(HInvoke* invoke) {
-  GenUnsafeGet(invoke, DataType::Type::kReference, /* is_volatile */ false, codegen_);
+  GenUnsafeGet(invoke, DataType::Type::kReference, /* is_volatile= */ false, codegen_);
 }
 void IntrinsicCodeGeneratorARMVIXL::VisitUnsafeGetObjectVolatile(HInvoke* invoke) {
-  GenUnsafeGet(invoke, DataType::Type::kReference, /* is_volatile */ true, codegen_);
+  GenUnsafeGet(invoke, DataType::Type::kReference, /* is_volatile= */ true, codegen_);
 }
 
 static void CreateIntIntIntIntToVoid(ArenaAllocator* allocator,
@@ -778,39 +778,39 @@
 
 void IntrinsicLocationsBuilderARMVIXL::VisitUnsafePut(HInvoke* invoke) {
   CreateIntIntIntIntToVoid(
-      allocator_, features_, DataType::Type::kInt32, /* is_volatile */ false, invoke);
+      allocator_, features_, DataType::Type::kInt32, /* is_volatile= */ false, invoke);
 }
 void IntrinsicLocationsBuilderARMVIXL::VisitUnsafePutOrdered(HInvoke* invoke) {
   CreateIntIntIntIntToVoid(
-      allocator_, features_, DataType::Type::kInt32, /* is_volatile */ false, invoke);
+      allocator_, features_, DataType::Type::kInt32, /* is_volatile= */ false, invoke);
 }
 void IntrinsicLocationsBuilderARMVIXL::VisitUnsafePutVolatile(HInvoke* invoke) {
   CreateIntIntIntIntToVoid(
-      allocator_, features_, DataType::Type::kInt32, /* is_volatile */ true, invoke);
+      allocator_, features_, DataType::Type::kInt32, /* is_volatile= */ true, invoke);
 }
 void IntrinsicLocationsBuilderARMVIXL::VisitUnsafePutObject(HInvoke* invoke) {
   CreateIntIntIntIntToVoid(
-      allocator_, features_, DataType::Type::kReference, /* is_volatile */ false, invoke);
+      allocator_, features_, DataType::Type::kReference, /* is_volatile= */ false, invoke);
 }
 void IntrinsicLocationsBuilderARMVIXL::VisitUnsafePutObjectOrdered(HInvoke* invoke) {
   CreateIntIntIntIntToVoid(
-      allocator_, features_, DataType::Type::kReference, /* is_volatile */ false, invoke);
+      allocator_, features_, DataType::Type::kReference, /* is_volatile= */ false, invoke);
 }
 void IntrinsicLocationsBuilderARMVIXL::VisitUnsafePutObjectVolatile(HInvoke* invoke) {
   CreateIntIntIntIntToVoid(
-      allocator_, features_, DataType::Type::kReference, /* is_volatile */ true, invoke);
+      allocator_, features_, DataType::Type::kReference, /* is_volatile= */ true, invoke);
 }
 void IntrinsicLocationsBuilderARMVIXL::VisitUnsafePutLong(HInvoke* invoke) {
   CreateIntIntIntIntToVoid(
-      allocator_, features_, DataType::Type::kInt64, /* is_volatile */ false, invoke);
+      allocator_, features_, DataType::Type::kInt64, /* is_volatile= */ false, invoke);
 }
 void IntrinsicLocationsBuilderARMVIXL::VisitUnsafePutLongOrdered(HInvoke* invoke) {
   CreateIntIntIntIntToVoid(
-      allocator_, features_, DataType::Type::kInt64, /* is_volatile */ false, invoke);
+      allocator_, features_, DataType::Type::kInt64, /* is_volatile= */ false, invoke);
 }
 void IntrinsicLocationsBuilderARMVIXL::VisitUnsafePutLongVolatile(HInvoke* invoke) {
   CreateIntIntIntIntToVoid(
-      allocator_, features_, DataType::Type::kInt64, /* is_volatile */ true, invoke);
+      allocator_, features_, DataType::Type::kInt64, /* is_volatile= */ true, invoke);
 }
 
 static void GenUnsafePut(LocationSummary* locations,
@@ -844,7 +844,7 @@
       __ Ldrexd(temp_lo, temp_hi, MemOperand(temp_reg));
       __ Strexd(temp_lo, value_lo, value_hi, MemOperand(temp_reg));
       __ Cmp(temp_lo, 0);
-      __ B(ne, &loop_head, /* far_target */ false);
+      __ B(ne, &loop_head, /* is_far_target= */ false);
     } else {
       __ Strd(value_lo, value_hi, MemOperand(base, offset));
     }
@@ -875,64 +875,64 @@
 void IntrinsicCodeGeneratorARMVIXL::VisitUnsafePut(HInvoke* invoke) {
   GenUnsafePut(invoke->GetLocations(),
                DataType::Type::kInt32,
-               /* is_volatile */ false,
-               /* is_ordered */ false,
+               /* is_volatile= */ false,
+               /* is_ordered= */ false,
                codegen_);
 }
 void IntrinsicCodeGeneratorARMVIXL::VisitUnsafePutOrdered(HInvoke* invoke) {
   GenUnsafePut(invoke->GetLocations(),
                DataType::Type::kInt32,
-               /* is_volatile */ false,
-               /* is_ordered */ true,
+               /* is_volatile= */ false,
+               /* is_ordered= */ true,
                codegen_);
 }
 void IntrinsicCodeGeneratorARMVIXL::VisitUnsafePutVolatile(HInvoke* invoke) {
   GenUnsafePut(invoke->GetLocations(),
                DataType::Type::kInt32,
-               /* is_volatile */ true,
-               /* is_ordered */ false,
+               /* is_volatile= */ true,
+               /* is_ordered= */ false,
                codegen_);
 }
 void IntrinsicCodeGeneratorARMVIXL::VisitUnsafePutObject(HInvoke* invoke) {
   GenUnsafePut(invoke->GetLocations(),
                DataType::Type::kReference,
-               /* is_volatile */ false,
-               /* is_ordered */ false,
+               /* is_volatile= */ false,
+               /* is_ordered= */ false,
                codegen_);
 }
 void IntrinsicCodeGeneratorARMVIXL::VisitUnsafePutObjectOrdered(HInvoke* invoke) {
   GenUnsafePut(invoke->GetLocations(),
                DataType::Type::kReference,
-               /* is_volatile */ false,
-               /* is_ordered */ true,
+               /* is_volatile= */ false,
+               /* is_ordered= */ true,
                codegen_);
 }
 void IntrinsicCodeGeneratorARMVIXL::VisitUnsafePutObjectVolatile(HInvoke* invoke) {
   GenUnsafePut(invoke->GetLocations(),
                DataType::Type::kReference,
-               /* is_volatile */ true,
-               /* is_ordered */ false,
+               /* is_volatile= */ true,
+               /* is_ordered= */ false,
                codegen_);
 }
 void IntrinsicCodeGeneratorARMVIXL::VisitUnsafePutLong(HInvoke* invoke) {
   GenUnsafePut(invoke->GetLocations(),
                DataType::Type::kInt64,
-               /* is_volatile */ false,
-               /* is_ordered */ false,
+               /* is_volatile= */ false,
+               /* is_ordered= */ false,
                codegen_);
 }
 void IntrinsicCodeGeneratorARMVIXL::VisitUnsafePutLongOrdered(HInvoke* invoke) {
   GenUnsafePut(invoke->GetLocations(),
                DataType::Type::kInt64,
-               /* is_volatile */ false,
-               /* is_ordered */ true,
+               /* is_volatile= */ false,
+               /* is_ordered= */ true,
                codegen_);
 }
 void IntrinsicCodeGeneratorARMVIXL::VisitUnsafePutLongVolatile(HInvoke* invoke) {
   GenUnsafePut(invoke->GetLocations(),
                DataType::Type::kInt64,
-               /* is_volatile */ true,
-               /* is_ordered */ false,
+               /* is_volatile= */ true,
+               /* is_ordered= */ false,
                codegen_);
 }
 
@@ -1026,7 +1026,7 @@
     __ Strex(tmp, value, MemOperand(tmp_ptr));
     assembler->MaybeUnpoisonHeapReference(value);
     __ Cmp(tmp, 0);
-    __ B(ne, &loop_head, /* far_target */ false);
+    __ B(ne, &loop_head, /* is_far_target= */ false);
     __ B(GetExitLabel());
   }
 };
@@ -1102,7 +1102,7 @@
     assembler->MaybeUnpoisonHeapReference(value);
   }
   __ Cmp(tmp, 0);
-  __ B(ne, &loop_head, /* far_target */ false);
+  __ B(ne, &loop_head, /* is_far_target= */ false);
 
   __ Bind(loop_exit);
 
@@ -1113,7 +1113,7 @@
   __ Lsr(out, out, WhichPowerOf2(out.GetSizeInBits()));
 
   if (type == DataType::Type::kReference) {
-    codegen->MaybeGenerateMarkingRegisterCheck(/* code */ 128);
+    codegen->MaybeGenerateMarkingRegisterCheck(/* code= */ 128);
   }
 }
 
@@ -1308,23 +1308,23 @@
   __ Ldr(temp_reg, MemOperand(str, temp1));
   __ Ldr(temp2, MemOperand(arg, temp1));
   __ Cmp(temp_reg, temp2);
-  __ B(ne, &find_char_diff, /* far_target */ false);
+  __ B(ne, &find_char_diff, /* is_far_target= */ false);
   __ Add(temp1, temp1, char_size * 2);
 
   __ Ldr(temp_reg, MemOperand(str, temp1));
   __ Ldr(temp2, MemOperand(arg, temp1));
   __ Cmp(temp_reg, temp2);
-  __ B(ne, &find_char_diff_2nd_cmp, /* far_target */ false);
+  __ B(ne, &find_char_diff_2nd_cmp, /* is_far_target= */ false);
   __ Add(temp1, temp1, char_size * 2);
   // With string compression, we have compared 8 bytes, otherwise 4 chars.
   __ Subs(temp0, temp0, (mirror::kUseStringCompression ? 8 : 4));
-  __ B(hi, &loop, /* far_target */ false);
+  __ B(hi, &loop, /* is_far_target= */ false);
   __ B(end);
 
   __ Bind(&find_char_diff_2nd_cmp);
   if (mirror::kUseStringCompression) {
     __ Subs(temp0, temp0, 4);  // 4 bytes previously compared.
-    __ B(ls, end, /* far_target */ false);  // Was the second comparison fully beyond the end?
+    __ B(ls, end, /* is_far_target= */ false);  // Was the second comparison fully beyond the end?
   } else {
     // Without string compression, we can start treating temp0 as signed
     // and rely on the signed comparison below.
@@ -1352,7 +1352,7 @@
   // the remaining string data, so just return length diff (out).
   // The comparison is unsigned for string compression, otherwise signed.
   __ Cmp(temp0, Operand(temp1, vixl32::LSR, (mirror::kUseStringCompression ? 3 : 4)));
-  __ B((mirror::kUseStringCompression ? ls : le), end, /* far_target */ false);
+  __ B((mirror::kUseStringCompression ? ls : le), end, /* is_far_target= */ false);
 
   // Extract the characters and calculate the difference.
   if (mirror::kUseStringCompression) {
@@ -1419,9 +1419,9 @@
     __ Ldrb(temp_reg, MemOperand(temp1, c_char_size, PostIndex));
     __ Ldrh(temp3, MemOperand(temp2, char_size, PostIndex));
     __ Cmp(temp_reg, temp3);
-    __ B(ne, &different_compression_diff, /* far_target */ false);
+    __ B(ne, &different_compression_diff, /* is_far_target= */ false);
     __ Subs(temp0, temp0, 2);
-    __ B(hi, &different_compression_loop, /* far_target */ false);
+    __ B(hi, &different_compression_loop, /* is_far_target= */ false);
     __ B(end);
 
     // Calculate the difference.
@@ -1517,12 +1517,12 @@
   StringEqualsOptimizations optimizations(invoke);
   if (!optimizations.GetArgumentNotNull()) {
     // Check if input is null, return false if it is.
-    __ CompareAndBranchIfZero(arg, &return_false, /* far_target */ false);
+    __ CompareAndBranchIfZero(arg, &return_false, /* is_far_target= */ false);
   }
 
   // Reference equality check, return true if same reference.
   __ Cmp(str, arg);
-  __ B(eq, &return_true, /* far_target */ false);
+  __ B(eq, &return_true, /* is_far_target= */ false);
 
   if (!optimizations.GetArgumentIsString()) {
     // Instanceof check for the argument by comparing class fields.
@@ -1532,7 +1532,7 @@
     __ Ldr(temp, MemOperand(str, class_offset));
     __ Ldr(out, MemOperand(arg, class_offset));
     __ Cmp(temp, out);
-    __ B(ne, &return_false, /* far_target */ false);
+    __ B(ne, &return_false, /* is_far_target= */ false);
   }
 
   // Check if one of the inputs is a const string. Do not special-case both strings
@@ -1555,7 +1555,7 @@
     // Also compares the compression style, if differs return false.
     __ Ldr(temp, MemOperand(arg, count_offset));
     __ Cmp(temp, Operand(mirror::String::GetFlaggedCount(const_string_length, is_compressed)));
-    __ B(ne, &return_false, /* far_target */ false);
+    __ B(ne, &return_false, /* is_far_target= */ false);
   } else {
     // Load `count` fields of this and argument strings.
     __ Ldr(temp, MemOperand(str, count_offset));
@@ -1563,7 +1563,7 @@
     // Check if `count` fields are equal, return false if they're not.
     // Also compares the compression style, if differs return false.
     __ Cmp(temp, out);
-    __ B(ne, &return_false, /* far_target */ false);
+    __ B(ne, &return_false, /* is_far_target= */ false);
   }
 
   // Assertions that must hold in order to compare strings 4 bytes at a time.
@@ -1586,9 +1586,9 @@
       __ Ldrd(temp, temp1, MemOperand(str, offset));
       __ Ldrd(temp2, out, MemOperand(arg, offset));
       __ Cmp(temp, temp2);
-      __ B(ne, &return_false, /* far_label */ false);
+      __ B(ne, &return_false, /* is_far_target= */ false);
       __ Cmp(temp1, out);
-      __ B(ne, &return_false, /* far_label */ false);
+      __ B(ne, &return_false, /* is_far_target= */ false);
       offset += 2u * sizeof(uint32_t);
       remaining_bytes -= 2u * sizeof(uint32_t);
     }
@@ -1596,13 +1596,13 @@
       __ Ldr(temp, MemOperand(str, offset));
       __ Ldr(out, MemOperand(arg, offset));
       __ Cmp(temp, out);
-      __ B(ne, &return_false, /* far_label */ false);
+      __ B(ne, &return_false, /* is_far_target= */ false);
     }
   } else {
     // Return true if both strings are empty. Even with string compression `count == 0` means empty.
     static_assert(static_cast<uint32_t>(mirror::StringCompressionFlag::kCompressed) == 0u,
                   "Expecting 0=compressed, 1=uncompressed");
-    __ CompareAndBranchIfZero(temp, &return_true, /* far_target */ false);
+    __ CompareAndBranchIfZero(temp, &return_true, /* is_far_target= */ false);
 
     if (mirror::kUseStringCompression) {
       // For string compression, calculate the number of bytes to compare (not chars).
@@ -1628,10 +1628,10 @@
     __ Ldr(temp2, MemOperand(arg, temp1));
     __ Add(temp1, temp1, Operand::From(sizeof(uint32_t)));
     __ Cmp(out, temp2);
-    __ B(ne, &return_false, /* far_target */ false);
+    __ B(ne, &return_false, /* is_far_target= */ false);
     // With string compression, we have compared 4 bytes, otherwise 2 chars.
     __ Subs(temp, temp, mirror::kUseStringCompression ? 4 : 2);
-    __ B(hi, &loop, /* far_target */ false);
+    __ B(hi, &loop, /* is_far_target= */ false);
   }
 
   // Return true and exit the function.
@@ -1712,7 +1712,7 @@
 }
 
 void IntrinsicCodeGeneratorARMVIXL::VisitStringIndexOf(HInvoke* invoke) {
-  GenerateVisitStringIndexOf(invoke, GetAssembler(), codegen_, /* start_at_zero */ true);
+  GenerateVisitStringIndexOf(invoke, GetAssembler(), codegen_, /* start_at_zero= */ true);
 }
 
 void IntrinsicLocationsBuilderARMVIXL::VisitStringIndexOfAfter(HInvoke* invoke) {
@@ -1728,7 +1728,7 @@
 }
 
 void IntrinsicCodeGeneratorARMVIXL::VisitStringIndexOfAfter(HInvoke* invoke) {
-  GenerateVisitStringIndexOf(invoke, GetAssembler(), codegen_, /* start_at_zero */ false);
+  GenerateVisitStringIndexOf(invoke, GetAssembler(), codegen_, /* start_at_zero= */ false);
 }
 
 void IntrinsicLocationsBuilderARMVIXL::VisitStringNewStringFromBytes(HInvoke* invoke) {
@@ -1950,7 +1950,7 @@
     } else {
       if (!optimizations.GetDestinationIsSource()) {
         __ Cmp(src, dest);
-        __ B(ne, &conditions_on_positions_validated, /* far_target */ false);
+        __ B(ne, &conditions_on_positions_validated, /* is_far_target= */ false);
       }
       __ Cmp(RegisterFrom(dest_pos), src_pos_constant);
       __ B(gt, intrinsic_slow_path->GetEntryLabel());
@@ -1958,7 +1958,7 @@
   } else {
     if (!optimizations.GetDestinationIsSource()) {
       __ Cmp(src, dest);
-      __ B(ne, &conditions_on_positions_validated, /* far_target */ false);
+      __ B(ne, &conditions_on_positions_validated, /* is_far_target= */ false);
     }
     if (dest_pos.IsConstant()) {
       int32_t dest_pos_constant = Int32ConstantFrom(dest_pos);
@@ -2018,11 +2018,11 @@
       if (!optimizations.GetSourceIsNonPrimitiveArray()) {
         // /* HeapReference<Class> */ temp1 = src->klass_
         codegen_->GenerateFieldLoadWithBakerReadBarrier(
-            invoke, temp1_loc, src, class_offset, temp2_loc, /* needs_null_check */ false);
+            invoke, temp1_loc, src, class_offset, temp2_loc, /* needs_null_check= */ false);
         // Bail out if the source is not a non primitive array.
         // /* HeapReference<Class> */ temp1 = temp1->component_type_
         codegen_->GenerateFieldLoadWithBakerReadBarrier(
-            invoke, temp1_loc, temp1, component_offset, temp2_loc, /* needs_null_check */ false);
+            invoke, temp1_loc, temp1, component_offset, temp2_loc, /* needs_null_check= */ false);
         __ CompareAndBranchIfZero(temp1, intrinsic_slow_path->GetEntryLabel());
         // If heap poisoning is enabled, `temp1` has been unpoisoned
         // by the the previous call to GenerateFieldLoadWithBakerReadBarrier.
@@ -2034,7 +2034,7 @@
 
       // /* HeapReference<Class> */ temp1 = dest->klass_
       codegen_->GenerateFieldLoadWithBakerReadBarrier(
-          invoke, temp1_loc, dest, class_offset, temp2_loc, /* needs_null_check */ false);
+          invoke, temp1_loc, dest, class_offset, temp2_loc, /* needs_null_check= */ false);
 
       if (!optimizations.GetDestinationIsNonPrimitiveArray()) {
         // Bail out if the destination is not a non primitive array.
@@ -2046,7 +2046,7 @@
         // temporaries such a `temp1`.
         // /* HeapReference<Class> */ temp2 = temp1->component_type_
         codegen_->GenerateFieldLoadWithBakerReadBarrier(
-            invoke, temp2_loc, temp1, component_offset, temp3_loc, /* needs_null_check */ false);
+            invoke, temp2_loc, temp1, component_offset, temp3_loc, /* needs_null_check= */ false);
         __ CompareAndBranchIfZero(temp2, intrinsic_slow_path->GetEntryLabel());
         // If heap poisoning is enabled, `temp2` has been unpoisoned
         // by the the previous call to GenerateFieldLoadWithBakerReadBarrier.
@@ -2060,16 +2060,16 @@
       // read barrier emitted by GenerateFieldLoadWithBakerReadBarrier below.
       // /* HeapReference<Class> */ temp2 = src->klass_
       codegen_->GenerateFieldLoadWithBakerReadBarrier(
-          invoke, temp2_loc, src, class_offset, temp3_loc, /* needs_null_check */ false);
+          invoke, temp2_loc, src, class_offset, temp3_loc, /* needs_null_check= */ false);
       // Note: if heap poisoning is on, we are comparing two unpoisoned references here.
       __ Cmp(temp1, temp2);
 
       if (optimizations.GetDestinationIsTypedObjectArray()) {
         vixl32::Label do_copy;
-        __ B(eq, &do_copy, /* far_target */ false);
+        __ B(eq, &do_copy, /* is_far_target= */ false);
         // /* HeapReference<Class> */ temp1 = temp1->component_type_
         codegen_->GenerateFieldLoadWithBakerReadBarrier(
-            invoke, temp1_loc, temp1, component_offset, temp2_loc, /* needs_null_check */ false);
+            invoke, temp1_loc, temp1, component_offset, temp2_loc, /* needs_null_check= */ false);
         // /* HeapReference<Class> */ temp1 = temp1->super_class_
         // We do not need to emit a read barrier for the following
         // heap reference load, as `temp1` is only used in a
@@ -2126,7 +2126,7 @@
 
       if (optimizations.GetDestinationIsTypedObjectArray()) {
         vixl32::Label do_copy;
-        __ B(eq, &do_copy, /* far_target */ false);
+        __ B(eq, &do_copy, /* is_far_target= */ false);
         if (!did_unpoison) {
           assembler->MaybeUnpoisonHeapReference(temp1);
         }
@@ -2148,10 +2148,10 @@
     if (kEmitCompilerReadBarrier && kUseBakerReadBarrier) {
       // /* HeapReference<Class> */ temp1 = src->klass_
       codegen_->GenerateFieldLoadWithBakerReadBarrier(
-          invoke, temp1_loc, src, class_offset, temp2_loc, /* needs_null_check */ false);
+          invoke, temp1_loc, src, class_offset, temp2_loc, /* needs_null_check= */ false);
       // /* HeapReference<Class> */ temp3 = temp1->component_type_
       codegen_->GenerateFieldLoadWithBakerReadBarrier(
-          invoke, temp3_loc, temp1, component_offset, temp2_loc, /* needs_null_check */ false);
+          invoke, temp3_loc, temp1, component_offset, temp2_loc, /* needs_null_check= */ false);
       __ CompareAndBranchIfZero(temp3, intrinsic_slow_path->GetEntryLabel());
       // If heap poisoning is enabled, `temp3` has been unpoisoned
       // by the the previous call to GenerateFieldLoadWithBakerReadBarrier.
@@ -2179,7 +2179,7 @@
 
     if (length.IsRegister()) {
       // Don't enter the copy loop if the length is null.
-      __ CompareAndBranchIfZero(RegisterFrom(length), &done, /* is_far_target */ false);
+      __ CompareAndBranchIfZero(RegisterFrom(length), &done, /* is_far_target= */ false);
     }
 
     if (kEmitCompilerReadBarrier && kUseBakerReadBarrier) {
@@ -2256,7 +2256,7 @@
         __ Str(temp_reg, MemOperand(temp2, element_size, PostIndex));
       }
       __ Cmp(temp1, temp3);
-      __ B(ne, &loop, /* far_target */ false);
+      __ B(ne, &loop, /* is_far_target= */ false);
 
       __ Bind(read_barrier_slow_path->GetExitLabel());
     } else {
@@ -2278,13 +2278,13 @@
         __ Str(temp_reg, MemOperand(temp2, element_size, PostIndex));
       }
       __ Cmp(temp1, temp3);
-      __ B(ne, &loop, /* far_target */ false);
+      __ B(ne, &loop, /* is_far_target= */ false);
     }
     __ Bind(&done);
   }
 
   // We only need one card marking on the destination array.
-  codegen_->MarkGCCard(temp1, temp2, dest, NoReg, /* value_can_be_null */ false);
+  codegen_->MarkGCCard(temp1, temp2, dest, NoReg, /* can_be_null= */ false);
 
   __ Bind(intrinsic_slow_path->GetExitLabel());
 }
@@ -2814,7 +2814,7 @@
 
   __ Subs(num_chr, srcEnd, srcBegin);
   // Early out for valid zero-length retrievals.
-  __ B(eq, final_label, /* far_target */ false);
+  __ B(eq, final_label, /* is_far_target= */ false);
 
   // src range to copy.
   __ Add(src_ptr, srcObj, value_offset);
@@ -2830,7 +2830,7 @@
     __ Ldr(temp, MemOperand(srcObj, count_offset));
     __ Tst(temp, 1);
     temps.Release(temp);
-    __ B(eq, &compressed_string_preloop, /* far_target */ false);
+    __ B(eq, &compressed_string_preloop, /* is_far_target= */ false);
   }
   __ Add(src_ptr, src_ptr, Operand(srcBegin, vixl32::LSL, 1));
 
@@ -2840,7 +2840,7 @@
   temp = temps.Acquire();
   // Save repairing the value of num_chr on the < 4 character path.
   __ Subs(temp, num_chr, 4);
-  __ B(lt, &remainder, /* far_target */ false);
+  __ B(lt, &remainder, /* is_far_target= */ false);
 
   // Keep the result of the earlier subs, we are going to fetch at least 4 characters.
   __ Mov(num_chr, temp);
@@ -2855,10 +2855,10 @@
   __ Ldr(temp, MemOperand(src_ptr, char_size * 4, PostIndex));
   __ Str(temp, MemOperand(dst_ptr, char_size * 4, PostIndex));
   temps.Release(temp);
-  __ B(ge, &loop, /* far_target */ false);
+  __ B(ge, &loop, /* is_far_target= */ false);
 
   __ Adds(num_chr, num_chr, 4);
-  __ B(eq, final_label, /* far_target */ false);
+  __ B(eq, final_label, /* is_far_target= */ false);
 
   // Main loop for < 4 character case and remainder handling. Loads and stores one
   // 16-bit Java character at a time.
@@ -2868,7 +2868,7 @@
   __ Subs(num_chr, num_chr, 1);
   __ Strh(temp, MemOperand(dst_ptr, char_size, PostIndex));
   temps.Release(temp);
-  __ B(gt, &remainder, /* far_target */ false);
+  __ B(gt, &remainder, /* is_far_target= */ false);
 
   if (mirror::kUseStringCompression) {
     __ B(final_label);
@@ -2884,7 +2884,7 @@
     __ Strh(temp, MemOperand(dst_ptr, char_size, PostIndex));
     temps.Release(temp);
     __ Subs(num_chr, num_chr, 1);
-    __ B(gt, &compressed_string_loop, /* far_target */ false);
+    __ B(gt, &compressed_string_loop, /* is_far_target= */ false);
   }
 
   if (done.IsReferenced()) {
@@ -3004,7 +3004,7 @@
     __ Add(out, in, -info.low);
     __ Cmp(out, info.length);
     vixl32::Label allocate, done;
-    __ B(hs, &allocate, /* is_far_target */ false);
+    __ B(hs, &allocate, /* is_far_target= */ false);
     // If the value is within the bounds, load the j.l.Integer directly from the array.
     codegen_->LoadBootImageAddress(temp, info.array_data_boot_image_reference);
     codegen_->LoadFromShiftedRegOffset(DataType::Type::kReference, locations->Out(), temp, out);
@@ -3037,7 +3037,7 @@
   vixl32::Register temp = temps.Acquire();
   vixl32::Label done;
   vixl32::Label* const final_label = codegen_->GetFinalLabel(invoke, &done);
-  __ CompareAndBranchIfZero(out, final_label, /* far_target */ false);
+  __ CompareAndBranchIfZero(out, final_label, /* is_far_target= */ false);
   __ Dmb(vixl32::ISH);
   __ Mov(temp, 0);
   assembler->StoreToOffset(kStoreWord, temp, tr, offset);
@@ -3061,6 +3061,7 @@
 UNIMPLEMENTED_INTRINSIC(ARMVIXL, ReferenceGetReferent)
 UNIMPLEMENTED_INTRINSIC(ARMVIXL, CRC32Update)
 UNIMPLEMENTED_INTRINSIC(ARMVIXL, CRC32UpdateBytes)
+UNIMPLEMENTED_INTRINSIC(ARMVIXL, CRC32UpdateByteBuffer)
 
 UNIMPLEMENTED_INTRINSIC(ARMVIXL, StringStringIndexOf);
 UNIMPLEMENTED_INTRINSIC(ARMVIXL, StringStringIndexOfAfter);
diff --git a/compiler/optimizing/intrinsics_mips.cc b/compiler/optimizing/intrinsics_mips.cc
index 08ba0a0..5b35974 100644
--- a/compiler/optimizing/intrinsics_mips.cc
+++ b/compiler/optimizing/intrinsics_mips.cc
@@ -185,7 +185,7 @@
 }
 
 void IntrinsicCodeGeneratorMIPS::VisitDoubleDoubleToRawLongBits(HInvoke* invoke) {
-  MoveFPToInt(invoke->GetLocations(), /* is64bit */ true, GetAssembler());
+  MoveFPToInt(invoke->GetLocations(), /* is64bit= */ true, GetAssembler());
 }
 
 // int java.lang.Float.floatToRawIntBits(float)
@@ -194,7 +194,7 @@
 }
 
 void IntrinsicCodeGeneratorMIPS::VisitFloatFloatToRawIntBits(HInvoke* invoke) {
-  MoveFPToInt(invoke->GetLocations(), /* is64bit */ false, GetAssembler());
+  MoveFPToInt(invoke->GetLocations(), /* is64bit= */ false, GetAssembler());
 }
 
 static void CreateIntToFPLocations(ArenaAllocator* allocator, HInvoke* invoke) {
@@ -226,7 +226,7 @@
 }
 
 void IntrinsicCodeGeneratorMIPS::VisitDoubleLongBitsToDouble(HInvoke* invoke) {
-  MoveIntToFP(invoke->GetLocations(), /* is64bit */ true, GetAssembler());
+  MoveIntToFP(invoke->GetLocations(), /* is64bit= */ true, GetAssembler());
 }
 
 // float java.lang.Float.intBitsToFloat(int)
@@ -235,7 +235,7 @@
 }
 
 void IntrinsicCodeGeneratorMIPS::VisitFloatIntBitsToFloat(HInvoke* invoke) {
-  MoveIntToFP(invoke->GetLocations(), /* is64bit */ false, GetAssembler());
+  MoveIntToFP(invoke->GetLocations(), /* is64bit= */ false, GetAssembler());
 }
 
 static void CreateIntToIntLocations(ArenaAllocator* allocator,
@@ -411,7 +411,7 @@
              DataType::Type::kInt32,
              IsR2OrNewer(),
              IsR6(),
-             /* reverseBits */ false,
+             /* reverseBits= */ false,
              GetAssembler());
 }
 
@@ -425,7 +425,7 @@
              DataType::Type::kInt64,
              IsR2OrNewer(),
              IsR6(),
-             /* reverseBits */ false,
+             /* reverseBits= */ false,
              GetAssembler());
 }
 
@@ -439,7 +439,7 @@
              DataType::Type::kInt16,
              IsR2OrNewer(),
              IsR6(),
-             /* reverseBits */ false,
+             /* reverseBits= */ false,
              GetAssembler());
 }
 
@@ -479,7 +479,7 @@
 }
 
 void IntrinsicCodeGeneratorMIPS::VisitIntegerNumberOfLeadingZeros(HInvoke* invoke) {
-  GenNumberOfLeadingZeroes(invoke->GetLocations(), /* is64bit */ false, IsR6(), GetAssembler());
+  GenNumberOfLeadingZeroes(invoke->GetLocations(), /* is64bit= */ false, IsR6(), GetAssembler());
 }
 
 // int java.lang.Long.numberOfLeadingZeros(long i)
@@ -488,7 +488,7 @@
 }
 
 void IntrinsicCodeGeneratorMIPS::VisitLongNumberOfLeadingZeros(HInvoke* invoke) {
-  GenNumberOfLeadingZeroes(invoke->GetLocations(), /* is64bit */ true, IsR6(), GetAssembler());
+  GenNumberOfLeadingZeroes(invoke->GetLocations(), /* is64bit= */ true, IsR6(), GetAssembler());
 }
 
 static void GenNumberOfTrailingZeroes(LocationSummary* locations,
@@ -566,7 +566,7 @@
 }
 
 void IntrinsicCodeGeneratorMIPS::VisitIntegerNumberOfTrailingZeros(HInvoke* invoke) {
-  GenNumberOfTrailingZeroes(invoke->GetLocations(), /* is64bit */ false, IsR6(), GetAssembler());
+  GenNumberOfTrailingZeroes(invoke->GetLocations(), /* is64bit= */ false, IsR6(), GetAssembler());
 }
 
 // int java.lang.Long.numberOfTrailingZeros(long i)
@@ -575,7 +575,7 @@
 }
 
 void IntrinsicCodeGeneratorMIPS::VisitLongNumberOfTrailingZeros(HInvoke* invoke) {
-  GenNumberOfTrailingZeroes(invoke->GetLocations(), /* is64bit */ true, IsR6(), GetAssembler());
+  GenNumberOfTrailingZeroes(invoke->GetLocations(), /* is64bit= */ true, IsR6(), GetAssembler());
 }
 
 // int java.lang.Integer.reverse(int)
@@ -588,7 +588,7 @@
              DataType::Type::kInt32,
              IsR2OrNewer(),
              IsR6(),
-             /* reverseBits */ true,
+             /* reverseBits= */ true,
              GetAssembler());
 }
 
@@ -602,7 +602,7 @@
              DataType::Type::kInt64,
              IsR2OrNewer(),
              IsR6(),
-             /* reverseBits */ true,
+             /* reverseBits= */ true,
              GetAssembler());
 }
 
@@ -1055,11 +1055,11 @@
           codegen->GenerateReferenceLoadWithBakerReadBarrier(invoke,
                                                              trg_loc,
                                                              base,
-                                                             /* offset */ 0U,
-                                                             /* index */ offset_loc,
+                                                             /* offset= */ 0U,
+                                                             /* index= */ offset_loc,
                                                              TIMES_1,
                                                              temp,
-                                                             /* needs_null_check */ false);
+                                                             /* needs_null_check= */ false);
           if (is_volatile) {
             __ Sync(0);
           }
@@ -1077,8 +1077,8 @@
                                            trg_loc,
                                            trg_loc,
                                            base_loc,
-                                           /* offset */ 0U,
-                                           /* index */ offset_loc);
+                                           /* offset= */ 0U,
+                                           /* index= */ offset_loc);
         }
       } else {
         if (is_R6) {
@@ -1107,7 +1107,7 @@
 }
 
 void IntrinsicCodeGeneratorMIPS::VisitUnsafeGet(HInvoke* invoke) {
-  GenUnsafeGet(invoke, DataType::Type::kInt32, /* is_volatile */ false, IsR6(), codegen_);
+  GenUnsafeGet(invoke, DataType::Type::kInt32, /* is_volatile= */ false, IsR6(), codegen_);
 }
 
 // int sun.misc.Unsafe.getIntVolatile(Object o, long offset)
@@ -1116,7 +1116,7 @@
 }
 
 void IntrinsicCodeGeneratorMIPS::VisitUnsafeGetVolatile(HInvoke* invoke) {
-  GenUnsafeGet(invoke, DataType::Type::kInt32, /* is_volatile */ true, IsR6(), codegen_);
+  GenUnsafeGet(invoke, DataType::Type::kInt32, /* is_volatile= */ true, IsR6(), codegen_);
 }
 
 // long sun.misc.Unsafe.getLong(Object o, long offset)
@@ -1125,7 +1125,7 @@
 }
 
 void IntrinsicCodeGeneratorMIPS::VisitUnsafeGetLong(HInvoke* invoke) {
-  GenUnsafeGet(invoke, DataType::Type::kInt64, /* is_volatile */ false, IsR6(), codegen_);
+  GenUnsafeGet(invoke, DataType::Type::kInt64, /* is_volatile= */ false, IsR6(), codegen_);
 }
 
 // Object sun.misc.Unsafe.getObject(Object o, long offset)
@@ -1134,7 +1134,7 @@
 }
 
 void IntrinsicCodeGeneratorMIPS::VisitUnsafeGetObject(HInvoke* invoke) {
-  GenUnsafeGet(invoke, DataType::Type::kReference, /* is_volatile */ false, IsR6(), codegen_);
+  GenUnsafeGet(invoke, DataType::Type::kReference, /* is_volatile= */ false, IsR6(), codegen_);
 }
 
 // Object sun.misc.Unsafe.getObjectVolatile(Object o, long offset)
@@ -1143,7 +1143,7 @@
 }
 
 void IntrinsicCodeGeneratorMIPS::VisitUnsafeGetObjectVolatile(HInvoke* invoke) {
-  GenUnsafeGet(invoke, DataType::Type::kReference, /* is_volatile */ true, IsR6(), codegen_);
+  GenUnsafeGet(invoke, DataType::Type::kReference, /* is_volatile= */ true, IsR6(), codegen_);
 }
 
 static void CreateIntIntIntIntToVoidLocations(ArenaAllocator* allocator, HInvoke* invoke) {
@@ -1225,8 +1225,8 @@
 void IntrinsicCodeGeneratorMIPS::VisitUnsafePut(HInvoke* invoke) {
   GenUnsafePut(invoke->GetLocations(),
                DataType::Type::kInt32,
-               /* is_volatile */ false,
-               /* is_ordered */ false,
+               /* is_volatile= */ false,
+               /* is_ordered= */ false,
                IsR6(),
                codegen_);
 }
@@ -1239,8 +1239,8 @@
 void IntrinsicCodeGeneratorMIPS::VisitUnsafePutOrdered(HInvoke* invoke) {
   GenUnsafePut(invoke->GetLocations(),
                DataType::Type::kInt32,
-               /* is_volatile */ false,
-               /* is_ordered */ true,
+               /* is_volatile= */ false,
+               /* is_ordered= */ true,
                IsR6(),
                codegen_);
 }
@@ -1253,8 +1253,8 @@
 void IntrinsicCodeGeneratorMIPS::VisitUnsafePutVolatile(HInvoke* invoke) {
   GenUnsafePut(invoke->GetLocations(),
                DataType::Type::kInt32,
-               /* is_volatile */ true,
-               /* is_ordered */ false,
+               /* is_volatile= */ true,
+               /* is_ordered= */ false,
                IsR6(),
                codegen_);
 }
@@ -1267,8 +1267,8 @@
 void IntrinsicCodeGeneratorMIPS::VisitUnsafePutObject(HInvoke* invoke) {
   GenUnsafePut(invoke->GetLocations(),
                DataType::Type::kReference,
-               /* is_volatile */ false,
-               /* is_ordered */ false,
+               /* is_volatile= */ false,
+               /* is_ordered= */ false,
                IsR6(),
                codegen_);
 }
@@ -1281,8 +1281,8 @@
 void IntrinsicCodeGeneratorMIPS::VisitUnsafePutObjectOrdered(HInvoke* invoke) {
   GenUnsafePut(invoke->GetLocations(),
                DataType::Type::kReference,
-               /* is_volatile */ false,
-               /* is_ordered */ true,
+               /* is_volatile= */ false,
+               /* is_ordered= */ true,
                IsR6(),
                codegen_);
 }
@@ -1295,8 +1295,8 @@
 void IntrinsicCodeGeneratorMIPS::VisitUnsafePutObjectVolatile(HInvoke* invoke) {
   GenUnsafePut(invoke->GetLocations(),
                DataType::Type::kReference,
-               /* is_volatile */ true,
-               /* is_ordered */ false,
+               /* is_volatile= */ true,
+               /* is_ordered= */ false,
                IsR6(),
                codegen_);
 }
@@ -1309,8 +1309,8 @@
 void IntrinsicCodeGeneratorMIPS::VisitUnsafePutLong(HInvoke* invoke) {
   GenUnsafePut(invoke->GetLocations(),
                DataType::Type::kInt64,
-               /* is_volatile */ false,
-               /* is_ordered */ false,
+               /* is_volatile= */ false,
+               /* is_ordered= */ false,
                IsR6(),
                codegen_);
 }
@@ -1323,8 +1323,8 @@
 void IntrinsicCodeGeneratorMIPS::VisitUnsafePutLongOrdered(HInvoke* invoke) {
   GenUnsafePut(invoke->GetLocations(),
                DataType::Type::kInt64,
-               /* is_volatile */ false,
-               /* is_ordered */ true,
+               /* is_volatile= */ false,
+               /* is_ordered= */ true,
                IsR6(),
                codegen_);
 }
@@ -1388,12 +1388,12 @@
           invoke,
           out_loc,  // Unused, used only as a "temporary" within the read barrier.
           base,
-          /* offset */ 0u,
-          /* index */ offset_loc,
+          /* offset= */ 0u,
+          /* index= */ offset_loc,
           ScaleFactor::TIMES_1,
           temp,
-          /* needs_null_check */ false,
-          /* always_update_field */ true);
+          /* needs_null_check= */ false,
+          /* always_update_field= */ true);
     }
   }
 
@@ -1706,7 +1706,7 @@
 }
 
 void IntrinsicCodeGeneratorMIPS::VisitStringIndexOf(HInvoke* invoke) {
-  GenerateStringIndexOf(invoke, /* start_at_zero */ true, GetAssembler(), codegen_);
+  GenerateStringIndexOf(invoke, /* start_at_zero= */ true, GetAssembler(), codegen_);
 }
 
 // int java.lang.String.indexOf(int ch, int fromIndex)
@@ -1727,7 +1727,7 @@
 }
 
 void IntrinsicCodeGeneratorMIPS::VisitStringIndexOfAfter(HInvoke* invoke) {
-  GenerateStringIndexOf(invoke, /* start_at_zero */ false, GetAssembler(), codegen_);
+  GenerateStringIndexOf(invoke, /* start_at_zero= */ false, GetAssembler(), codegen_);
 }
 
 // java.lang.StringFactory.newStringFromBytes(byte[] data, int high, int offset, int byteCount)
@@ -2698,6 +2698,7 @@
 
 UNIMPLEMENTED_INTRINSIC(MIPS, CRC32Update)
 UNIMPLEMENTED_INTRINSIC(MIPS, CRC32UpdateBytes)
+UNIMPLEMENTED_INTRINSIC(MIPS, CRC32UpdateByteBuffer)
 
 UNIMPLEMENTED_INTRINSIC(MIPS, StringStringIndexOf);
 UNIMPLEMENTED_INTRINSIC(MIPS, StringStringIndexOfAfter);
diff --git a/compiler/optimizing/intrinsics_mips64.cc b/compiler/optimizing/intrinsics_mips64.cc
index 59d3ba2..afaa4ca 100644
--- a/compiler/optimizing/intrinsics_mips64.cc
+++ b/compiler/optimizing/intrinsics_mips64.cc
@@ -169,7 +169,7 @@
 }
 
 void IntrinsicCodeGeneratorMIPS64::VisitDoubleDoubleToRawLongBits(HInvoke* invoke) {
-  MoveFPToInt(invoke->GetLocations(), /* is64bit */ true, GetAssembler());
+  MoveFPToInt(invoke->GetLocations(), /* is64bit= */ true, GetAssembler());
 }
 
 // int java.lang.Float.floatToRawIntBits(float)
@@ -178,7 +178,7 @@
 }
 
 void IntrinsicCodeGeneratorMIPS64::VisitFloatFloatToRawIntBits(HInvoke* invoke) {
-  MoveFPToInt(invoke->GetLocations(), /* is64bit */ false, GetAssembler());
+  MoveFPToInt(invoke->GetLocations(), /* is64bit= */ false, GetAssembler());
 }
 
 static void CreateIntToFPLocations(ArenaAllocator* allocator, HInvoke* invoke) {
@@ -205,7 +205,7 @@
 }
 
 void IntrinsicCodeGeneratorMIPS64::VisitDoubleLongBitsToDouble(HInvoke* invoke) {
-  MoveIntToFP(invoke->GetLocations(), /* is64bit */ true, GetAssembler());
+  MoveIntToFP(invoke->GetLocations(), /* is64bit= */ true, GetAssembler());
 }
 
 // float java.lang.Float.intBitsToFloat(int)
@@ -214,7 +214,7 @@
 }
 
 void IntrinsicCodeGeneratorMIPS64::VisitFloatIntBitsToFloat(HInvoke* invoke) {
-  MoveIntToFP(invoke->GetLocations(), /* is64bit */ false, GetAssembler());
+  MoveIntToFP(invoke->GetLocations(), /* is64bit= */ false, GetAssembler());
 }
 
 static void CreateIntToIntLocations(ArenaAllocator* allocator, HInvoke* invoke) {
@@ -295,7 +295,7 @@
 }
 
 void IntrinsicCodeGeneratorMIPS64::VisitIntegerNumberOfLeadingZeros(HInvoke* invoke) {
-  GenNumberOfLeadingZeroes(invoke->GetLocations(), /* is64bit */ false, GetAssembler());
+  GenNumberOfLeadingZeroes(invoke->GetLocations(), /* is64bit= */ false, GetAssembler());
 }
 
 // int java.lang.Long.numberOfLeadingZeros(long i)
@@ -304,7 +304,7 @@
 }
 
 void IntrinsicCodeGeneratorMIPS64::VisitLongNumberOfLeadingZeros(HInvoke* invoke) {
-  GenNumberOfLeadingZeroes(invoke->GetLocations(), /* is64bit */ true, GetAssembler());
+  GenNumberOfLeadingZeroes(invoke->GetLocations(), /* is64bit= */ true, GetAssembler());
 }
 
 static void GenNumberOfTrailingZeroes(LocationSummary* locations,
@@ -332,7 +332,7 @@
 }
 
 void IntrinsicCodeGeneratorMIPS64::VisitIntegerNumberOfTrailingZeros(HInvoke* invoke) {
-  GenNumberOfTrailingZeroes(invoke->GetLocations(), /* is64bit */ false, GetAssembler());
+  GenNumberOfTrailingZeroes(invoke->GetLocations(), /* is64bit= */ false, GetAssembler());
 }
 
 // int java.lang.Long.numberOfTrailingZeros(long i)
@@ -341,7 +341,7 @@
 }
 
 void IntrinsicCodeGeneratorMIPS64::VisitLongNumberOfTrailingZeros(HInvoke* invoke) {
-  GenNumberOfTrailingZeroes(invoke->GetLocations(), /* is64bit */ true, GetAssembler());
+  GenNumberOfTrailingZeroes(invoke->GetLocations(), /* is64bit= */ true, GetAssembler());
 }
 
 static void GenReverse(LocationSummary* locations,
@@ -911,11 +911,11 @@
           codegen->GenerateReferenceLoadWithBakerReadBarrier(invoke,
                                                              trg_loc,
                                                              base,
-                                                             /* offset */ 0U,
-                                                             /* index */ offset_loc,
+                                                             /* offset= */ 0U,
+                                                             /* index= */ offset_loc,
                                                              TIMES_1,
                                                              temp,
-                                                             /* needs_null_check */ false);
+                                                             /* needs_null_check= */ false);
           if (is_volatile) {
             __ Sync(0);
           }
@@ -928,8 +928,8 @@
                                            trg_loc,
                                            trg_loc,
                                            base_loc,
-                                           /* offset */ 0U,
-                                           /* index */ offset_loc);
+                                           /* offset= */ 0U,
+                                           /* index= */ offset_loc);
         }
       } else {
         __ Lwu(trg, TMP, 0);
@@ -952,7 +952,7 @@
 }
 
 void IntrinsicCodeGeneratorMIPS64::VisitUnsafeGet(HInvoke* invoke) {
-  GenUnsafeGet(invoke, DataType::Type::kInt32, /* is_volatile */ false, codegen_);
+  GenUnsafeGet(invoke, DataType::Type::kInt32, /* is_volatile= */ false, codegen_);
 }
 
 // int sun.misc.Unsafe.getIntVolatile(Object o, long offset)
@@ -961,7 +961,7 @@
 }
 
 void IntrinsicCodeGeneratorMIPS64::VisitUnsafeGetVolatile(HInvoke* invoke) {
-  GenUnsafeGet(invoke, DataType::Type::kInt32, /* is_volatile */ true, codegen_);
+  GenUnsafeGet(invoke, DataType::Type::kInt32, /* is_volatile= */ true, codegen_);
 }
 
 // long sun.misc.Unsafe.getLong(Object o, long offset)
@@ -970,7 +970,7 @@
 }
 
 void IntrinsicCodeGeneratorMIPS64::VisitUnsafeGetLong(HInvoke* invoke) {
-  GenUnsafeGet(invoke, DataType::Type::kInt64, /* is_volatile */ false, codegen_);
+  GenUnsafeGet(invoke, DataType::Type::kInt64, /* is_volatile= */ false, codegen_);
 }
 
 // long sun.misc.Unsafe.getLongVolatile(Object o, long offset)
@@ -979,7 +979,7 @@
 }
 
 void IntrinsicCodeGeneratorMIPS64::VisitUnsafeGetLongVolatile(HInvoke* invoke) {
-  GenUnsafeGet(invoke, DataType::Type::kInt64, /* is_volatile */ true, codegen_);
+  GenUnsafeGet(invoke, DataType::Type::kInt64, /* is_volatile= */ true, codegen_);
 }
 
 // Object sun.misc.Unsafe.getObject(Object o, long offset)
@@ -988,7 +988,7 @@
 }
 
 void IntrinsicCodeGeneratorMIPS64::VisitUnsafeGetObject(HInvoke* invoke) {
-  GenUnsafeGet(invoke, DataType::Type::kReference, /* is_volatile */ false, codegen_);
+  GenUnsafeGet(invoke, DataType::Type::kReference, /* is_volatile= */ false, codegen_);
 }
 
 // Object sun.misc.Unsafe.getObjectVolatile(Object o, long offset)
@@ -997,7 +997,7 @@
 }
 
 void IntrinsicCodeGeneratorMIPS64::VisitUnsafeGetObjectVolatile(HInvoke* invoke) {
-  GenUnsafeGet(invoke, DataType::Type::kReference, /* is_volatile */ true, codegen_);
+  GenUnsafeGet(invoke, DataType::Type::kReference, /* is_volatile= */ true, codegen_);
 }
 
 static void CreateIntIntIntIntToVoid(ArenaAllocator* allocator, HInvoke* invoke) {
@@ -1067,8 +1067,8 @@
 void IntrinsicCodeGeneratorMIPS64::VisitUnsafePut(HInvoke* invoke) {
   GenUnsafePut(invoke->GetLocations(),
                DataType::Type::kInt32,
-               /* is_volatile */ false,
-               /* is_ordered */ false,
+               /* is_volatile= */ false,
+               /* is_ordered= */ false,
                codegen_);
 }
 
@@ -1080,8 +1080,8 @@
 void IntrinsicCodeGeneratorMIPS64::VisitUnsafePutOrdered(HInvoke* invoke) {
   GenUnsafePut(invoke->GetLocations(),
                DataType::Type::kInt32,
-               /* is_volatile */ false,
-               /* is_ordered */ true,
+               /* is_volatile= */ false,
+               /* is_ordered= */ true,
                codegen_);
 }
 
@@ -1093,8 +1093,8 @@
 void IntrinsicCodeGeneratorMIPS64::VisitUnsafePutVolatile(HInvoke* invoke) {
   GenUnsafePut(invoke->GetLocations(),
                DataType::Type::kInt32,
-               /* is_volatile */ true,
-               /* is_ordered */ false,
+               /* is_volatile= */ true,
+               /* is_ordered= */ false,
                codegen_);
 }
 
@@ -1106,8 +1106,8 @@
 void IntrinsicCodeGeneratorMIPS64::VisitUnsafePutObject(HInvoke* invoke) {
   GenUnsafePut(invoke->GetLocations(),
                DataType::Type::kReference,
-               /* is_volatile */ false,
-               /* is_ordered */ false,
+               /* is_volatile= */ false,
+               /* is_ordered= */ false,
                codegen_);
 }
 
@@ -1119,8 +1119,8 @@
 void IntrinsicCodeGeneratorMIPS64::VisitUnsafePutObjectOrdered(HInvoke* invoke) {
   GenUnsafePut(invoke->GetLocations(),
                DataType::Type::kReference,
-               /* is_volatile */ false,
-               /* is_ordered */ true,
+               /* is_volatile= */ false,
+               /* is_ordered= */ true,
                codegen_);
 }
 
@@ -1132,8 +1132,8 @@
 void IntrinsicCodeGeneratorMIPS64::VisitUnsafePutObjectVolatile(HInvoke* invoke) {
   GenUnsafePut(invoke->GetLocations(),
                DataType::Type::kReference,
-               /* is_volatile */ true,
-               /* is_ordered */ false,
+               /* is_volatile= */ true,
+               /* is_ordered= */ false,
                codegen_);
 }
 
@@ -1145,8 +1145,8 @@
 void IntrinsicCodeGeneratorMIPS64::VisitUnsafePutLong(HInvoke* invoke) {
   GenUnsafePut(invoke->GetLocations(),
                DataType::Type::kInt64,
-               /* is_volatile */ false,
-               /* is_ordered */ false,
+               /* is_volatile= */ false,
+               /* is_ordered= */ false,
                codegen_);
 }
 
@@ -1158,8 +1158,8 @@
 void IntrinsicCodeGeneratorMIPS64::VisitUnsafePutLongOrdered(HInvoke* invoke) {
   GenUnsafePut(invoke->GetLocations(),
                DataType::Type::kInt64,
-               /* is_volatile */ false,
-               /* is_ordered */ true,
+               /* is_volatile= */ false,
+               /* is_ordered= */ true,
                codegen_);
 }
 
@@ -1171,8 +1171,8 @@
 void IntrinsicCodeGeneratorMIPS64::VisitUnsafePutLongVolatile(HInvoke* invoke) {
   GenUnsafePut(invoke->GetLocations(),
                DataType::Type::kInt64,
-               /* is_volatile */ true,
-               /* is_ordered */ false,
+               /* is_volatile= */ true,
+               /* is_ordered= */ false,
                codegen_);
 }
 
@@ -1234,12 +1234,12 @@
           invoke,
           out_loc,  // Unused, used only as a "temporary" within the read barrier.
           base,
-          /* offset */ 0u,
-          /* index */ offset_loc,
+          /* offset= */ 0u,
+          /* index= */ offset_loc,
           ScaleFactor::TIMES_1,
           temp,
-          /* needs_null_check */ false,
-          /* always_update_field */ true);
+          /* needs_null_check= */ false,
+          /* always_update_field= */ true);
     }
   }
 
@@ -1548,7 +1548,7 @@
 }
 
 void IntrinsicCodeGeneratorMIPS64::VisitStringIndexOf(HInvoke* invoke) {
-  GenerateStringIndexOf(invoke, GetAssembler(), codegen_, /* start_at_zero */ true);
+  GenerateStringIndexOf(invoke, GetAssembler(), codegen_, /* start_at_zero= */ true);
 }
 
 // int java.lang.String.indexOf(int ch, int fromIndex)
@@ -1566,7 +1566,7 @@
 }
 
 void IntrinsicCodeGeneratorMIPS64::VisitStringIndexOfAfter(HInvoke* invoke) {
-  GenerateStringIndexOf(invoke, GetAssembler(), codegen_, /* start_at_zero */ false);
+  GenerateStringIndexOf(invoke, GetAssembler(), codegen_, /* start_at_zero= */ false);
 }
 
 // java.lang.StringFactory.newStringFromBytes(byte[] data, int high, int offset, int byteCount)
@@ -1667,7 +1667,7 @@
 }
 
 void IntrinsicCodeGeneratorMIPS64::VisitFloatIsInfinite(HInvoke* invoke) {
-  GenIsInfinite(invoke->GetLocations(), /* is64bit */ false, GetAssembler());
+  GenIsInfinite(invoke->GetLocations(), /* is64bit= */ false, GetAssembler());
 }
 
 // boolean java.lang.Double.isInfinite(double)
@@ -1676,7 +1676,7 @@
 }
 
 void IntrinsicCodeGeneratorMIPS64::VisitDoubleIsInfinite(HInvoke* invoke) {
-  GenIsInfinite(invoke->GetLocations(), /* is64bit */ true, GetAssembler());
+  GenIsInfinite(invoke->GetLocations(), /* is64bit= */ true, GetAssembler());
 }
 
 // void java.lang.String.getChars(int srcBegin, int srcEnd, char[] dst, int dstBegin)
@@ -2348,6 +2348,7 @@
 UNIMPLEMENTED_INTRINSIC(MIPS64, SystemArrayCopy)
 UNIMPLEMENTED_INTRINSIC(MIPS64, CRC32Update)
 UNIMPLEMENTED_INTRINSIC(MIPS64, CRC32UpdateBytes)
+UNIMPLEMENTED_INTRINSIC(MIPS64, CRC32UpdateByteBuffer)
 
 UNIMPLEMENTED_INTRINSIC(MIPS64, StringStringIndexOf);
 UNIMPLEMENTED_INTRINSIC(MIPS64, StringStringIndexOfAfter);
diff --git a/compiler/optimizing/intrinsics_x86.cc b/compiler/optimizing/intrinsics_x86.cc
index 1d94950..8747f06 100644
--- a/compiler/optimizing/intrinsics_x86.cc
+++ b/compiler/optimizing/intrinsics_x86.cc
@@ -223,31 +223,31 @@
 }
 
 void IntrinsicLocationsBuilderX86::VisitDoubleDoubleToRawLongBits(HInvoke* invoke) {
-  CreateFPToIntLocations(allocator_, invoke, /* is64bit */ true);
+  CreateFPToIntLocations(allocator_, invoke, /* is64bit= */ true);
 }
 void IntrinsicLocationsBuilderX86::VisitDoubleLongBitsToDouble(HInvoke* invoke) {
-  CreateIntToFPLocations(allocator_, invoke, /* is64bit */ true);
+  CreateIntToFPLocations(allocator_, invoke, /* is64bit= */ true);
 }
 
 void IntrinsicCodeGeneratorX86::VisitDoubleDoubleToRawLongBits(HInvoke* invoke) {
-  MoveFPToInt(invoke->GetLocations(), /* is64bit */ true, GetAssembler());
+  MoveFPToInt(invoke->GetLocations(), /* is64bit= */ true, GetAssembler());
 }
 void IntrinsicCodeGeneratorX86::VisitDoubleLongBitsToDouble(HInvoke* invoke) {
-  MoveIntToFP(invoke->GetLocations(), /* is64bit */ true, GetAssembler());
+  MoveIntToFP(invoke->GetLocations(), /* is64bit= */ true, GetAssembler());
 }
 
 void IntrinsicLocationsBuilderX86::VisitFloatFloatToRawIntBits(HInvoke* invoke) {
-  CreateFPToIntLocations(allocator_, invoke, /* is64bit */ false);
+  CreateFPToIntLocations(allocator_, invoke, /* is64bit= */ false);
 }
 void IntrinsicLocationsBuilderX86::VisitFloatIntBitsToFloat(HInvoke* invoke) {
-  CreateIntToFPLocations(allocator_, invoke, /* is64bit */ false);
+  CreateIntToFPLocations(allocator_, invoke, /* is64bit= */ false);
 }
 
 void IntrinsicCodeGeneratorX86::VisitFloatFloatToRawIntBits(HInvoke* invoke) {
-  MoveFPToInt(invoke->GetLocations(), /* is64bit */ false, GetAssembler());
+  MoveFPToInt(invoke->GetLocations(), /* is64bit= */ false, GetAssembler());
 }
 void IntrinsicCodeGeneratorX86::VisitFloatIntBitsToFloat(HInvoke* invoke) {
-  MoveIntToFP(invoke->GetLocations(), /* is64bit */ false, GetAssembler());
+  MoveIntToFP(invoke->GetLocations(), /* is64bit= */ false, GetAssembler());
 }
 
 static void CreateIntToIntLocations(ArenaAllocator* allocator, HInvoke* invoke) {
@@ -1317,19 +1317,19 @@
 }
 
 void IntrinsicLocationsBuilderX86::VisitStringIndexOf(HInvoke* invoke) {
-  CreateStringIndexOfLocations(invoke, allocator_, /* start_at_zero */ true);
+  CreateStringIndexOfLocations(invoke, allocator_, /* start_at_zero= */ true);
 }
 
 void IntrinsicCodeGeneratorX86::VisitStringIndexOf(HInvoke* invoke) {
-  GenerateStringIndexOf(invoke, GetAssembler(), codegen_, /* start_at_zero */ true);
+  GenerateStringIndexOf(invoke, GetAssembler(), codegen_, /* start_at_zero= */ true);
 }
 
 void IntrinsicLocationsBuilderX86::VisitStringIndexOfAfter(HInvoke* invoke) {
-  CreateStringIndexOfLocations(invoke, allocator_, /* start_at_zero */ false);
+  CreateStringIndexOfLocations(invoke, allocator_, /* start_at_zero= */ false);
 }
 
 void IntrinsicCodeGeneratorX86::VisitStringIndexOfAfter(HInvoke* invoke) {
-  GenerateStringIndexOf(invoke, GetAssembler(), codegen_, /* start_at_zero */ false);
+  GenerateStringIndexOf(invoke, GetAssembler(), codegen_, /* start_at_zero= */ false);
 }
 
 void IntrinsicLocationsBuilderX86::VisitStringNewStringFromBytes(HInvoke* invoke) {
@@ -1691,7 +1691,7 @@
         if (kUseBakerReadBarrier) {
           Address src(base, offset, ScaleFactor::TIMES_1, 0);
           codegen->GenerateReferenceLoadWithBakerReadBarrier(
-              invoke, output_loc, base, src, /* needs_null_check */ false);
+              invoke, output_loc, base, src, /* needs_null_check= */ false);
         } else {
           __ movl(output, Address(base, offset, ScaleFactor::TIMES_1, 0));
           codegen->GenerateReadBarrierSlow(
@@ -1762,45 +1762,45 @@
 
 void IntrinsicLocationsBuilderX86::VisitUnsafeGet(HInvoke* invoke) {
   CreateIntIntIntToIntLocations(
-      allocator_, invoke, DataType::Type::kInt32, /* is_volatile */ false);
+      allocator_, invoke, DataType::Type::kInt32, /* is_volatile= */ false);
 }
 void IntrinsicLocationsBuilderX86::VisitUnsafeGetVolatile(HInvoke* invoke) {
-  CreateIntIntIntToIntLocations(allocator_, invoke, DataType::Type::kInt32, /* is_volatile */ true);
+  CreateIntIntIntToIntLocations(allocator_, invoke, DataType::Type::kInt32, /* is_volatile= */ true);
 }
 void IntrinsicLocationsBuilderX86::VisitUnsafeGetLong(HInvoke* invoke) {
   CreateIntIntIntToIntLocations(
-      allocator_, invoke, DataType::Type::kInt64, /* is_volatile */ false);
+      allocator_, invoke, DataType::Type::kInt64, /* is_volatile= */ false);
 }
 void IntrinsicLocationsBuilderX86::VisitUnsafeGetLongVolatile(HInvoke* invoke) {
-  CreateIntIntIntToIntLocations(allocator_, invoke, DataType::Type::kInt64, /* is_volatile */ true);
+  CreateIntIntIntToIntLocations(allocator_, invoke, DataType::Type::kInt64, /* is_volatile= */ true);
 }
 void IntrinsicLocationsBuilderX86::VisitUnsafeGetObject(HInvoke* invoke) {
   CreateIntIntIntToIntLocations(
-      allocator_, invoke, DataType::Type::kReference, /* is_volatile */ false);
+      allocator_, invoke, DataType::Type::kReference, /* is_volatile= */ false);
 }
 void IntrinsicLocationsBuilderX86::VisitUnsafeGetObjectVolatile(HInvoke* invoke) {
   CreateIntIntIntToIntLocations(
-      allocator_, invoke, DataType::Type::kReference, /* is_volatile */ true);
+      allocator_, invoke, DataType::Type::kReference, /* is_volatile= */ true);
 }
 
 
 void IntrinsicCodeGeneratorX86::VisitUnsafeGet(HInvoke* invoke) {
-  GenUnsafeGet(invoke, DataType::Type::kInt32, /* is_volatile */ false, codegen_);
+  GenUnsafeGet(invoke, DataType::Type::kInt32, /* is_volatile= */ false, codegen_);
 }
 void IntrinsicCodeGeneratorX86::VisitUnsafeGetVolatile(HInvoke* invoke) {
-  GenUnsafeGet(invoke, DataType::Type::kInt32, /* is_volatile */ true, codegen_);
+  GenUnsafeGet(invoke, DataType::Type::kInt32, /* is_volatile= */ true, codegen_);
 }
 void IntrinsicCodeGeneratorX86::VisitUnsafeGetLong(HInvoke* invoke) {
-  GenUnsafeGet(invoke, DataType::Type::kInt64, /* is_volatile */ false, codegen_);
+  GenUnsafeGet(invoke, DataType::Type::kInt64, /* is_volatile= */ false, codegen_);
 }
 void IntrinsicCodeGeneratorX86::VisitUnsafeGetLongVolatile(HInvoke* invoke) {
-  GenUnsafeGet(invoke, DataType::Type::kInt64, /* is_volatile */ true, codegen_);
+  GenUnsafeGet(invoke, DataType::Type::kInt64, /* is_volatile= */ true, codegen_);
 }
 void IntrinsicCodeGeneratorX86::VisitUnsafeGetObject(HInvoke* invoke) {
-  GenUnsafeGet(invoke, DataType::Type::kReference, /* is_volatile */ false, codegen_);
+  GenUnsafeGet(invoke, DataType::Type::kReference, /* is_volatile= */ false, codegen_);
 }
 void IntrinsicCodeGeneratorX86::VisitUnsafeGetObjectVolatile(HInvoke* invoke) {
-  GenUnsafeGet(invoke, DataType::Type::kReference, /* is_volatile */ true, codegen_);
+  GenUnsafeGet(invoke, DataType::Type::kReference, /* is_volatile= */ true, codegen_);
 }
 
 
@@ -1827,39 +1827,39 @@
 
 void IntrinsicLocationsBuilderX86::VisitUnsafePut(HInvoke* invoke) {
   CreateIntIntIntIntToVoidPlusTempsLocations(
-      allocator_, DataType::Type::kInt32, invoke, /* is_volatile */ false);
+      allocator_, DataType::Type::kInt32, invoke, /* is_volatile= */ false);
 }
 void IntrinsicLocationsBuilderX86::VisitUnsafePutOrdered(HInvoke* invoke) {
   CreateIntIntIntIntToVoidPlusTempsLocations(
-      allocator_, DataType::Type::kInt32, invoke, /* is_volatile */ false);
+      allocator_, DataType::Type::kInt32, invoke, /* is_volatile= */ false);
 }
 void IntrinsicLocationsBuilderX86::VisitUnsafePutVolatile(HInvoke* invoke) {
   CreateIntIntIntIntToVoidPlusTempsLocations(
-      allocator_, DataType::Type::kInt32, invoke, /* is_volatile */ true);
+      allocator_, DataType::Type::kInt32, invoke, /* is_volatile= */ true);
 }
 void IntrinsicLocationsBuilderX86::VisitUnsafePutObject(HInvoke* invoke) {
   CreateIntIntIntIntToVoidPlusTempsLocations(
-      allocator_, DataType::Type::kReference, invoke, /* is_volatile */ false);
+      allocator_, DataType::Type::kReference, invoke, /* is_volatile= */ false);
 }
 void IntrinsicLocationsBuilderX86::VisitUnsafePutObjectOrdered(HInvoke* invoke) {
   CreateIntIntIntIntToVoidPlusTempsLocations(
-      allocator_, DataType::Type::kReference, invoke, /* is_volatile */ false);
+      allocator_, DataType::Type::kReference, invoke, /* is_volatile= */ false);
 }
 void IntrinsicLocationsBuilderX86::VisitUnsafePutObjectVolatile(HInvoke* invoke) {
   CreateIntIntIntIntToVoidPlusTempsLocations(
-      allocator_, DataType::Type::kReference, invoke, /* is_volatile */ true);
+      allocator_, DataType::Type::kReference, invoke, /* is_volatile= */ true);
 }
 void IntrinsicLocationsBuilderX86::VisitUnsafePutLong(HInvoke* invoke) {
   CreateIntIntIntIntToVoidPlusTempsLocations(
-      allocator_, DataType::Type::kInt64, invoke, /* is_volatile */ false);
+      allocator_, DataType::Type::kInt64, invoke, /* is_volatile= */ false);
 }
 void IntrinsicLocationsBuilderX86::VisitUnsafePutLongOrdered(HInvoke* invoke) {
   CreateIntIntIntIntToVoidPlusTempsLocations(
-      allocator_, DataType::Type::kInt64, invoke, /* is_volatile */ false);
+      allocator_, DataType::Type::kInt64, invoke, /* is_volatile= */ false);
 }
 void IntrinsicLocationsBuilderX86::VisitUnsafePutLongVolatile(HInvoke* invoke) {
   CreateIntIntIntIntToVoidPlusTempsLocations(
-      allocator_, DataType::Type::kInt64, invoke, /* is_volatile */ true);
+      allocator_, DataType::Type::kInt64, invoke, /* is_volatile= */ true);
 }
 
 // We don't care for ordered: it requires an AnyStore barrier, which is already given by the x86
@@ -1911,34 +1911,34 @@
 }
 
 void IntrinsicCodeGeneratorX86::VisitUnsafePut(HInvoke* invoke) {
-  GenUnsafePut(invoke->GetLocations(), DataType::Type::kInt32, /* is_volatile */ false, codegen_);
+  GenUnsafePut(invoke->GetLocations(), DataType::Type::kInt32, /* is_volatile= */ false, codegen_);
 }
 void IntrinsicCodeGeneratorX86::VisitUnsafePutOrdered(HInvoke* invoke) {
-  GenUnsafePut(invoke->GetLocations(), DataType::Type::kInt32, /* is_volatile */ false, codegen_);
+  GenUnsafePut(invoke->GetLocations(), DataType::Type::kInt32, /* is_volatile= */ false, codegen_);
 }
 void IntrinsicCodeGeneratorX86::VisitUnsafePutVolatile(HInvoke* invoke) {
-  GenUnsafePut(invoke->GetLocations(), DataType::Type::kInt32, /* is_volatile */ true, codegen_);
+  GenUnsafePut(invoke->GetLocations(), DataType::Type::kInt32, /* is_volatile= */ true, codegen_);
 }
 void IntrinsicCodeGeneratorX86::VisitUnsafePutObject(HInvoke* invoke) {
   GenUnsafePut(
-      invoke->GetLocations(), DataType::Type::kReference, /* is_volatile */ false, codegen_);
+      invoke->GetLocations(), DataType::Type::kReference, /* is_volatile= */ false, codegen_);
 }
 void IntrinsicCodeGeneratorX86::VisitUnsafePutObjectOrdered(HInvoke* invoke) {
   GenUnsafePut(
-      invoke->GetLocations(), DataType::Type::kReference, /* is_volatile */ false, codegen_);
+      invoke->GetLocations(), DataType::Type::kReference, /* is_volatile= */ false, codegen_);
 }
 void IntrinsicCodeGeneratorX86::VisitUnsafePutObjectVolatile(HInvoke* invoke) {
   GenUnsafePut(
-      invoke->GetLocations(), DataType::Type::kReference, /* is_volatile */ true, codegen_);
+      invoke->GetLocations(), DataType::Type::kReference, /* is_volatile= */ true, codegen_);
 }
 void IntrinsicCodeGeneratorX86::VisitUnsafePutLong(HInvoke* invoke) {
-  GenUnsafePut(invoke->GetLocations(), DataType::Type::kInt64, /* is_volatile */ false, codegen_);
+  GenUnsafePut(invoke->GetLocations(), DataType::Type::kInt64, /* is_volatile= */ false, codegen_);
 }
 void IntrinsicCodeGeneratorX86::VisitUnsafePutLongOrdered(HInvoke* invoke) {
-  GenUnsafePut(invoke->GetLocations(), DataType::Type::kInt64, /* is_volatile */ false, codegen_);
+  GenUnsafePut(invoke->GetLocations(), DataType::Type::kInt64, /* is_volatile= */ false, codegen_);
 }
 void IntrinsicCodeGeneratorX86::VisitUnsafePutLongVolatile(HInvoke* invoke) {
-  GenUnsafePut(invoke->GetLocations(), DataType::Type::kInt64, /* is_volatile */ true, codegen_);
+  GenUnsafePut(invoke->GetLocations(), DataType::Type::kInt64, /* is_volatile= */ true, codegen_);
 }
 
 static void CreateIntIntIntIntIntToInt(ArenaAllocator* allocator,
@@ -2035,8 +2035,8 @@
           temp1_loc,  // Unused, used only as a "temporary" within the read barrier.
           base,
           field_addr,
-          /* needs_null_check */ false,
-          /* always_update_field */ true,
+          /* needs_null_check= */ false,
+          /* always_update_field= */ true,
           &temp2);
     }
 
@@ -2267,19 +2267,19 @@
 }
 
 void IntrinsicLocationsBuilderX86::VisitIntegerBitCount(HInvoke* invoke) {
-  CreateBitCountLocations(allocator_, codegen_, invoke, /* is_long */ false);
+  CreateBitCountLocations(allocator_, codegen_, invoke, /* is_long= */ false);
 }
 
 void IntrinsicCodeGeneratorX86::VisitIntegerBitCount(HInvoke* invoke) {
-  GenBitCount(GetAssembler(), codegen_, invoke, /* is_long */ false);
+  GenBitCount(GetAssembler(), codegen_, invoke, /* is_long= */ false);
 }
 
 void IntrinsicLocationsBuilderX86::VisitLongBitCount(HInvoke* invoke) {
-  CreateBitCountLocations(allocator_, codegen_, invoke, /* is_long */ true);
+  CreateBitCountLocations(allocator_, codegen_, invoke, /* is_long= */ true);
 }
 
 void IntrinsicCodeGeneratorX86::VisitLongBitCount(HInvoke* invoke) {
-  GenBitCount(GetAssembler(), codegen_, invoke, /* is_long */ true);
+  GenBitCount(GetAssembler(), codegen_, invoke, /* is_long= */ true);
 }
 
 static void CreateLeadingZeroLocations(ArenaAllocator* allocator, HInvoke* invoke, bool is_long) {
@@ -2371,19 +2371,19 @@
 }
 
 void IntrinsicLocationsBuilderX86::VisitIntegerNumberOfLeadingZeros(HInvoke* invoke) {
-  CreateLeadingZeroLocations(allocator_, invoke, /* is_long */ false);
+  CreateLeadingZeroLocations(allocator_, invoke, /* is_long= */ false);
 }
 
 void IntrinsicCodeGeneratorX86::VisitIntegerNumberOfLeadingZeros(HInvoke* invoke) {
-  GenLeadingZeros(GetAssembler(), codegen_, invoke, /* is_long */ false);
+  GenLeadingZeros(GetAssembler(), codegen_, invoke, /* is_long= */ false);
 }
 
 void IntrinsicLocationsBuilderX86::VisitLongNumberOfLeadingZeros(HInvoke* invoke) {
-  CreateLeadingZeroLocations(allocator_, invoke, /* is_long */ true);
+  CreateLeadingZeroLocations(allocator_, invoke, /* is_long= */ true);
 }
 
 void IntrinsicCodeGeneratorX86::VisitLongNumberOfLeadingZeros(HInvoke* invoke) {
-  GenLeadingZeros(GetAssembler(), codegen_, invoke, /* is_long */ true);
+  GenLeadingZeros(GetAssembler(), codegen_, invoke, /* is_long= */ true);
 }
 
 static void CreateTrailingZeroLocations(ArenaAllocator* allocator, HInvoke* invoke, bool is_long) {
@@ -2462,19 +2462,19 @@
 }
 
 void IntrinsicLocationsBuilderX86::VisitIntegerNumberOfTrailingZeros(HInvoke* invoke) {
-  CreateTrailingZeroLocations(allocator_, invoke, /* is_long */ false);
+  CreateTrailingZeroLocations(allocator_, invoke, /* is_long= */ false);
 }
 
 void IntrinsicCodeGeneratorX86::VisitIntegerNumberOfTrailingZeros(HInvoke* invoke) {
-  GenTrailingZeros(GetAssembler(), codegen_, invoke, /* is_long */ false);
+  GenTrailingZeros(GetAssembler(), codegen_, invoke, /* is_long= */ false);
 }
 
 void IntrinsicLocationsBuilderX86::VisitLongNumberOfTrailingZeros(HInvoke* invoke) {
-  CreateTrailingZeroLocations(allocator_, invoke, /* is_long */ true);
+  CreateTrailingZeroLocations(allocator_, invoke, /* is_long= */ true);
 }
 
 void IntrinsicCodeGeneratorX86::VisitLongNumberOfTrailingZeros(HInvoke* invoke) {
-  GenTrailingZeros(GetAssembler(), codegen_, invoke, /* is_long */ true);
+  GenTrailingZeros(GetAssembler(), codegen_, invoke, /* is_long= */ true);
 }
 
 static bool IsSameInput(HInstruction* instruction, size_t input0, size_t input1) {
@@ -2682,11 +2682,11 @@
       if (kEmitCompilerReadBarrier && kUseBakerReadBarrier) {
         // /* HeapReference<Class> */ temp1 = src->klass_
         codegen_->GenerateFieldLoadWithBakerReadBarrier(
-            invoke, temp1_loc, src, class_offset, /* needs_null_check */ false);
+            invoke, temp1_loc, src, class_offset, /* needs_null_check= */ false);
         // Bail out if the source is not a non primitive array.
         // /* HeapReference<Class> */ temp1 = temp1->component_type_
         codegen_->GenerateFieldLoadWithBakerReadBarrier(
-            invoke, temp1_loc, temp1, component_offset, /* needs_null_check */ false);
+            invoke, temp1_loc, temp1, component_offset, /* needs_null_check= */ false);
         __ testl(temp1, temp1);
         __ j(kEqual, intrinsic_slow_path->GetEntryLabel());
         // If heap poisoning is enabled, `temp1` has been unpoisoned
@@ -2719,7 +2719,7 @@
 
       // /* HeapReference<Class> */ temp1 = dest->klass_
       codegen_->GenerateFieldLoadWithBakerReadBarrier(
-          invoke, temp1_loc, dest, class_offset, /* needs_null_check */ false);
+          invoke, temp1_loc, dest, class_offset, /* needs_null_check= */ false);
 
       if (!optimizations.GetDestinationIsNonPrimitiveArray()) {
         // Bail out if the destination is not a non primitive array.
@@ -2731,7 +2731,7 @@
         // temporaries such a `temp1`.
         // /* HeapReference<Class> */ temp2 = temp1->component_type_
         codegen_->GenerateFieldLoadWithBakerReadBarrier(
-            invoke, temp2_loc, temp1, component_offset, /* needs_null_check */ false);
+            invoke, temp2_loc, temp1, component_offset, /* needs_null_check= */ false);
         __ testl(temp2, temp2);
         __ j(kEqual, intrinsic_slow_path->GetEntryLabel());
         // If heap poisoning is enabled, `temp2` has been unpoisoned
@@ -2744,7 +2744,7 @@
       // read barrier emitted by GenerateFieldLoadWithBakerReadBarrier below.
       // /* HeapReference<Class> */ temp2 = src->klass_
       codegen_->GenerateFieldLoadWithBakerReadBarrier(
-          invoke, temp2_loc, src, class_offset, /* needs_null_check */ false);
+          invoke, temp2_loc, src, class_offset, /* needs_null_check= */ false);
       // Note: if heap poisoning is on, we are comparing two unpoisoned references here.
       __ cmpl(temp1, temp2);
 
@@ -2753,7 +2753,7 @@
         __ j(kEqual, &do_copy);
         // /* HeapReference<Class> */ temp1 = temp1->component_type_
         codegen_->GenerateFieldLoadWithBakerReadBarrier(
-            invoke, temp1_loc, temp1, component_offset, /* needs_null_check */ false);
+            invoke, temp1_loc, temp1, component_offset, /* needs_null_check= */ false);
         // We do not need to emit a read barrier for the following
         // heap reference load, as `temp1` is only used in a
         // comparison with null below, and this reference is not
@@ -2807,10 +2807,10 @@
     if (kEmitCompilerReadBarrier && kUseBakerReadBarrier) {
       // /* HeapReference<Class> */ temp1 = src->klass_
       codegen_->GenerateFieldLoadWithBakerReadBarrier(
-          invoke, temp1_loc, src, class_offset, /* needs_null_check */ false);
+          invoke, temp1_loc, src, class_offset, /* needs_null_check= */ false);
       // /* HeapReference<Class> */ temp1 = temp1->component_type_
       codegen_->GenerateFieldLoadWithBakerReadBarrier(
-          invoke, temp1_loc, temp1, component_offset, /* needs_null_check */ false);
+          invoke, temp1_loc, temp1, component_offset, /* needs_null_check= */ false);
       __ testl(temp1, temp1);
       __ j(kEqual, intrinsic_slow_path->GetEntryLabel());
       // If heap poisoning is enabled, `temp1` has been unpoisoned
@@ -2943,7 +2943,7 @@
   }
 
   // We only need one card marking on the destination array.
-  codegen_->MarkGCCard(temp1, temp2, dest, Register(kNoRegister), /* value_can_be_null */ false);
+  codegen_->MarkGCCard(temp1, temp2, dest, Register(kNoRegister), /* value_can_be_null= */ false);
 
   __ Bind(intrinsic_slow_path->GetExitLabel());
 }
@@ -3072,6 +3072,7 @@
 UNIMPLEMENTED_INTRINSIC(X86, LongHighestOneBit)
 UNIMPLEMENTED_INTRINSIC(X86, CRC32Update)
 UNIMPLEMENTED_INTRINSIC(X86, CRC32UpdateBytes)
+UNIMPLEMENTED_INTRINSIC(X86, CRC32UpdateByteBuffer)
 
 UNIMPLEMENTED_INTRINSIC(X86, StringStringIndexOf);
 UNIMPLEMENTED_INTRINSIC(X86, StringStringIndexOfAfter);
diff --git a/compiler/optimizing/intrinsics_x86_64.cc b/compiler/optimizing/intrinsics_x86_64.cc
index 4f0b61d..167c1d8 100644
--- a/compiler/optimizing/intrinsics_x86_64.cc
+++ b/compiler/optimizing/intrinsics_x86_64.cc
@@ -162,10 +162,10 @@
 }
 
 void IntrinsicCodeGeneratorX86_64::VisitDoubleDoubleToRawLongBits(HInvoke* invoke) {
-  MoveFPToInt(invoke->GetLocations(), /* is64bit */ true, GetAssembler());
+  MoveFPToInt(invoke->GetLocations(), /* is64bit= */ true, GetAssembler());
 }
 void IntrinsicCodeGeneratorX86_64::VisitDoubleLongBitsToDouble(HInvoke* invoke) {
-  MoveIntToFP(invoke->GetLocations(), /* is64bit */ true, GetAssembler());
+  MoveIntToFP(invoke->GetLocations(), /* is64bit= */ true, GetAssembler());
 }
 
 void IntrinsicLocationsBuilderX86_64::VisitFloatFloatToRawIntBits(HInvoke* invoke) {
@@ -176,10 +176,10 @@
 }
 
 void IntrinsicCodeGeneratorX86_64::VisitFloatFloatToRawIntBits(HInvoke* invoke) {
-  MoveFPToInt(invoke->GetLocations(), /* is64bit */ false, GetAssembler());
+  MoveFPToInt(invoke->GetLocations(), /* is64bit= */ false, GetAssembler());
 }
 void IntrinsicCodeGeneratorX86_64::VisitFloatIntBitsToFloat(HInvoke* invoke) {
-  MoveIntToFP(invoke->GetLocations(), /* is64bit */ false, GetAssembler());
+  MoveIntToFP(invoke->GetLocations(), /* is64bit= */ false, GetAssembler());
 }
 
 static void CreateIntToIntLocations(ArenaAllocator* allocator, HInvoke* invoke) {
@@ -430,12 +430,12 @@
   // direct x86 instruction, since NaN should map to 0 and large positive
   // values need to be clipped to the extreme value.
   codegen_->Load64BitValue(out, kPrimLongMax);
-  __ cvtsi2sd(t2, out, /* is64bit */ true);
+  __ cvtsi2sd(t2, out, /* is64bit= */ true);
   __ comisd(t1, t2);
   __ j(kAboveEqual, &done);  // clipped to max (already in out), does not jump on unordered
   __ movl(out, Immediate(0));  // does not change flags, implicit zero extension to 64-bit
   __ j(kUnordered, &done);  // NaN mapped to 0 (just moved in out)
-  __ cvttsd2si(out, t1, /* is64bit */ true);
+  __ cvttsd2si(out, t1, /* is64bit= */ true);
   __ Bind(&done);
 }
 
@@ -979,7 +979,7 @@
     if (kEmitCompilerReadBarrier && kUseBakerReadBarrier) {
       // /* HeapReference<Class> */ temp1 = dest->klass_
       codegen_->GenerateFieldLoadWithBakerReadBarrier(
-          invoke, temp1_loc, dest, class_offset, /* needs_null_check */ false);
+          invoke, temp1_loc, dest, class_offset, /* needs_null_check= */ false);
       // Register `temp1` is not trashed by the read barrier emitted
       // by GenerateFieldLoadWithBakerReadBarrier below, as that
       // method produces a call to a ReadBarrierMarkRegX entry point,
@@ -987,7 +987,7 @@
       // temporaries such a `temp1`.
       // /* HeapReference<Class> */ temp2 = src->klass_
       codegen_->GenerateFieldLoadWithBakerReadBarrier(
-          invoke, temp2_loc, src, class_offset, /* needs_null_check */ false);
+          invoke, temp2_loc, src, class_offset, /* needs_null_check= */ false);
       // If heap poisoning is enabled, `temp1` and `temp2` have been
       // unpoisoned by the the previous calls to
       // GenerateFieldLoadWithBakerReadBarrier.
@@ -1011,7 +1011,7 @@
       if (kEmitCompilerReadBarrier && kUseBakerReadBarrier) {
         // /* HeapReference<Class> */ TMP = temp1->component_type_
         codegen_->GenerateFieldLoadWithBakerReadBarrier(
-            invoke, TMP_loc, temp1, component_offset, /* needs_null_check */ false);
+            invoke, TMP_loc, temp1, component_offset, /* needs_null_check= */ false);
         __ testl(CpuRegister(TMP), CpuRegister(TMP));
         __ j(kEqual, intrinsic_slow_path->GetEntryLabel());
         // If heap poisoning is enabled, `TMP` has been unpoisoned by
@@ -1034,7 +1034,7 @@
         // read barrier emitted by GenerateFieldLoadWithBakerReadBarrier below.
         // /* HeapReference<Class> */ TMP = temp2->component_type_
         codegen_->GenerateFieldLoadWithBakerReadBarrier(
-            invoke, TMP_loc, temp2, component_offset, /* needs_null_check */ false);
+            invoke, TMP_loc, temp2, component_offset, /* needs_null_check= */ false);
         __ testl(CpuRegister(TMP), CpuRegister(TMP));
         __ j(kEqual, intrinsic_slow_path->GetEntryLabel());
         // If heap poisoning is enabled, `TMP` has been unpoisoned by
@@ -1058,7 +1058,7 @@
       if (kEmitCompilerReadBarrier && kUseBakerReadBarrier) {
         // /* HeapReference<Class> */ temp1 = temp1->component_type_
         codegen_->GenerateFieldLoadWithBakerReadBarrier(
-            invoke, temp1_loc, temp1, component_offset, /* needs_null_check */ false);
+            invoke, temp1_loc, temp1, component_offset, /* needs_null_check= */ false);
         // We do not need to emit a read barrier for the following
         // heap reference load, as `temp1` is only used in a
         // comparison with null below, and this reference is not
@@ -1086,10 +1086,10 @@
     if (kEmitCompilerReadBarrier && kUseBakerReadBarrier) {
       // /* HeapReference<Class> */ temp1 = src->klass_
       codegen_->GenerateFieldLoadWithBakerReadBarrier(
-          invoke, temp1_loc, src, class_offset, /* needs_null_check */ false);
+          invoke, temp1_loc, src, class_offset, /* needs_null_check= */ false);
       // /* HeapReference<Class> */ TMP = temp1->component_type_
       codegen_->GenerateFieldLoadWithBakerReadBarrier(
-          invoke, TMP_loc, temp1, component_offset, /* needs_null_check */ false);
+          invoke, TMP_loc, temp1, component_offset, /* needs_null_check= */ false);
       __ testl(CpuRegister(TMP), CpuRegister(TMP));
       __ j(kEqual, intrinsic_slow_path->GetEntryLabel());
     } else {
@@ -1198,7 +1198,7 @@
   }
 
   // We only need one card marking on the destination array.
-  codegen_->MarkGCCard(temp1, temp2, dest, CpuRegister(kNoRegister), /* value_can_be_null */ false);
+  codegen_->MarkGCCard(temp1, temp2, dest, CpuRegister(kNoRegister), /* value_can_be_null= */ false);
 
   __ Bind(intrinsic_slow_path->GetExitLabel());
 }
@@ -1444,7 +1444,7 @@
     // Ensure we have a start index >= 0;
     __ xorl(counter, counter);
     __ cmpl(start_index, Immediate(0));
-    __ cmov(kGreater, counter, start_index, /* is64bit */ false);  // 32-bit copy is enough.
+    __ cmov(kGreater, counter, start_index, /* is64bit= */ false);  // 32-bit copy is enough.
 
     if (mirror::kUseStringCompression) {
       NearLabel modify_counter, offset_uncompressed_label;
@@ -1506,19 +1506,19 @@
 }
 
 void IntrinsicLocationsBuilderX86_64::VisitStringIndexOf(HInvoke* invoke) {
-  CreateStringIndexOfLocations(invoke, allocator_, /* start_at_zero */ true);
+  CreateStringIndexOfLocations(invoke, allocator_, /* start_at_zero= */ true);
 }
 
 void IntrinsicCodeGeneratorX86_64::VisitStringIndexOf(HInvoke* invoke) {
-  GenerateStringIndexOf(invoke, GetAssembler(), codegen_, /* start_at_zero */ true);
+  GenerateStringIndexOf(invoke, GetAssembler(), codegen_, /* start_at_zero= */ true);
 }
 
 void IntrinsicLocationsBuilderX86_64::VisitStringIndexOfAfter(HInvoke* invoke) {
-  CreateStringIndexOfLocations(invoke, allocator_, /* start_at_zero */ false);
+  CreateStringIndexOfLocations(invoke, allocator_, /* start_at_zero= */ false);
 }
 
 void IntrinsicCodeGeneratorX86_64::VisitStringIndexOfAfter(HInvoke* invoke) {
-  GenerateStringIndexOf(invoke, GetAssembler(), codegen_, /* start_at_zero */ false);
+  GenerateStringIndexOf(invoke, GetAssembler(), codegen_, /* start_at_zero= */ false);
 }
 
 void IntrinsicLocationsBuilderX86_64::VisitStringNewStringFromBytes(HInvoke* invoke) {
@@ -1832,7 +1832,7 @@
 void IntrinsicCodeGeneratorX86_64::VisitThreadCurrentThread(HInvoke* invoke) {
   CpuRegister out = invoke->GetLocations()->Out().AsRegister<CpuRegister>();
   GetAssembler()->gs()->movl(out, Address::Absolute(Thread::PeerOffset<kX86_64PointerSize>(),
-                                                    /* no_rip */ true));
+                                                    /* no_rip= */ true));
 }
 
 static void GenUnsafeGet(HInvoke* invoke,
@@ -1858,7 +1858,7 @@
         if (kUseBakerReadBarrier) {
           Address src(base, offset, ScaleFactor::TIMES_1, 0);
           codegen->GenerateReferenceLoadWithBakerReadBarrier(
-              invoke, output_loc, base, src, /* needs_null_check */ false);
+              invoke, output_loc, base, src, /* needs_null_check= */ false);
         } else {
           __ movl(output, Address(base, offset, ScaleFactor::TIMES_1, 0));
           codegen->GenerateReadBarrierSlow(
@@ -1922,22 +1922,22 @@
 
 
 void IntrinsicCodeGeneratorX86_64::VisitUnsafeGet(HInvoke* invoke) {
-  GenUnsafeGet(invoke, DataType::Type::kInt32, /* is_volatile */ false, codegen_);
+  GenUnsafeGet(invoke, DataType::Type::kInt32, /* is_volatile= */ false, codegen_);
 }
 void IntrinsicCodeGeneratorX86_64::VisitUnsafeGetVolatile(HInvoke* invoke) {
-  GenUnsafeGet(invoke, DataType::Type::kInt32, /* is_volatile */ true, codegen_);
+  GenUnsafeGet(invoke, DataType::Type::kInt32, /* is_volatile= */ true, codegen_);
 }
 void IntrinsicCodeGeneratorX86_64::VisitUnsafeGetLong(HInvoke* invoke) {
-  GenUnsafeGet(invoke, DataType::Type::kInt64, /* is_volatile */ false, codegen_);
+  GenUnsafeGet(invoke, DataType::Type::kInt64, /* is_volatile= */ false, codegen_);
 }
 void IntrinsicCodeGeneratorX86_64::VisitUnsafeGetLongVolatile(HInvoke* invoke) {
-  GenUnsafeGet(invoke, DataType::Type::kInt64, /* is_volatile */ true, codegen_);
+  GenUnsafeGet(invoke, DataType::Type::kInt64, /* is_volatile= */ true, codegen_);
 }
 void IntrinsicCodeGeneratorX86_64::VisitUnsafeGetObject(HInvoke* invoke) {
-  GenUnsafeGet(invoke, DataType::Type::kReference, /* is_volatile */ false, codegen_);
+  GenUnsafeGet(invoke, DataType::Type::kReference, /* is_volatile= */ false, codegen_);
 }
 void IntrinsicCodeGeneratorX86_64::VisitUnsafeGetObjectVolatile(HInvoke* invoke) {
-  GenUnsafeGet(invoke, DataType::Type::kReference, /* is_volatile */ true, codegen_);
+  GenUnsafeGet(invoke, DataType::Type::kReference, /* is_volatile= */ true, codegen_);
 }
 
 
@@ -2020,34 +2020,34 @@
 }
 
 void IntrinsicCodeGeneratorX86_64::VisitUnsafePut(HInvoke* invoke) {
-  GenUnsafePut(invoke->GetLocations(), DataType::Type::kInt32, /* is_volatile */ false, codegen_);
+  GenUnsafePut(invoke->GetLocations(), DataType::Type::kInt32, /* is_volatile= */ false, codegen_);
 }
 void IntrinsicCodeGeneratorX86_64::VisitUnsafePutOrdered(HInvoke* invoke) {
-  GenUnsafePut(invoke->GetLocations(), DataType::Type::kInt32, /* is_volatile */ false, codegen_);
+  GenUnsafePut(invoke->GetLocations(), DataType::Type::kInt32, /* is_volatile= */ false, codegen_);
 }
 void IntrinsicCodeGeneratorX86_64::VisitUnsafePutVolatile(HInvoke* invoke) {
-  GenUnsafePut(invoke->GetLocations(), DataType::Type::kInt32, /* is_volatile */ true, codegen_);
+  GenUnsafePut(invoke->GetLocations(), DataType::Type::kInt32, /* is_volatile= */ true, codegen_);
 }
 void IntrinsicCodeGeneratorX86_64::VisitUnsafePutObject(HInvoke* invoke) {
   GenUnsafePut(
-      invoke->GetLocations(), DataType::Type::kReference, /* is_volatile */ false, codegen_);
+      invoke->GetLocations(), DataType::Type::kReference, /* is_volatile= */ false, codegen_);
 }
 void IntrinsicCodeGeneratorX86_64::VisitUnsafePutObjectOrdered(HInvoke* invoke) {
   GenUnsafePut(
-      invoke->GetLocations(), DataType::Type::kReference, /* is_volatile */ false, codegen_);
+      invoke->GetLocations(), DataType::Type::kReference, /* is_volatile= */ false, codegen_);
 }
 void IntrinsicCodeGeneratorX86_64::VisitUnsafePutObjectVolatile(HInvoke* invoke) {
   GenUnsafePut(
-      invoke->GetLocations(), DataType::Type::kReference, /* is_volatile */ true, codegen_);
+      invoke->GetLocations(), DataType::Type::kReference, /* is_volatile= */ true, codegen_);
 }
 void IntrinsicCodeGeneratorX86_64::VisitUnsafePutLong(HInvoke* invoke) {
-  GenUnsafePut(invoke->GetLocations(), DataType::Type::kInt64, /* is_volatile */ false, codegen_);
+  GenUnsafePut(invoke->GetLocations(), DataType::Type::kInt64, /* is_volatile= */ false, codegen_);
 }
 void IntrinsicCodeGeneratorX86_64::VisitUnsafePutLongOrdered(HInvoke* invoke) {
-  GenUnsafePut(invoke->GetLocations(), DataType::Type::kInt64, /* is_volatile */ false, codegen_);
+  GenUnsafePut(invoke->GetLocations(), DataType::Type::kInt64, /* is_volatile= */ false, codegen_);
 }
 void IntrinsicCodeGeneratorX86_64::VisitUnsafePutLongVolatile(HInvoke* invoke) {
-  GenUnsafePut(invoke->GetLocations(), DataType::Type::kInt64, /* is_volatile */ true, codegen_);
+  GenUnsafePut(invoke->GetLocations(), DataType::Type::kInt64, /* is_volatile= */ true, codegen_);
 }
 
 static void CreateIntIntIntIntIntToInt(ArenaAllocator* allocator,
@@ -2132,8 +2132,8 @@
           out_loc,  // Unused, used only as a "temporary" within the read barrier.
           base,
           field_addr,
-          /* needs_null_check */ false,
-          /* always_update_field */ true,
+          /* needs_null_check= */ false,
+          /* always_update_field= */ true,
           &temp1,
           &temp2);
     }
@@ -2361,7 +2361,7 @@
 }
 
 void IntrinsicCodeGeneratorX86_64::VisitIntegerBitCount(HInvoke* invoke) {
-  GenBitCount(GetAssembler(), codegen_, invoke, /* is_long */ false);
+  GenBitCount(GetAssembler(), codegen_, invoke, /* is_long= */ false);
 }
 
 void IntrinsicLocationsBuilderX86_64::VisitLongBitCount(HInvoke* invoke) {
@@ -2369,7 +2369,7 @@
 }
 
 void IntrinsicCodeGeneratorX86_64::VisitLongBitCount(HInvoke* invoke) {
-  GenBitCount(GetAssembler(), codegen_, invoke, /* is_long */ true);
+  GenBitCount(GetAssembler(), codegen_, invoke, /* is_long= */ true);
 }
 
 static void CreateOneBitLocations(ArenaAllocator* allocator, HInvoke* invoke, bool is_high) {
@@ -2476,35 +2476,35 @@
 }
 
 void IntrinsicLocationsBuilderX86_64::VisitIntegerHighestOneBit(HInvoke* invoke) {
-  CreateOneBitLocations(allocator_, invoke, /* is_high */ true);
+  CreateOneBitLocations(allocator_, invoke, /* is_high= */ true);
 }
 
 void IntrinsicCodeGeneratorX86_64::VisitIntegerHighestOneBit(HInvoke* invoke) {
-  GenOneBit(GetAssembler(), codegen_, invoke, /* is_high */ true, /* is_long */ false);
+  GenOneBit(GetAssembler(), codegen_, invoke, /* is_high= */ true, /* is_long= */ false);
 }
 
 void IntrinsicLocationsBuilderX86_64::VisitLongHighestOneBit(HInvoke* invoke) {
-  CreateOneBitLocations(allocator_, invoke, /* is_high */ true);
+  CreateOneBitLocations(allocator_, invoke, /* is_high= */ true);
 }
 
 void IntrinsicCodeGeneratorX86_64::VisitLongHighestOneBit(HInvoke* invoke) {
-  GenOneBit(GetAssembler(), codegen_, invoke, /* is_high */ true, /* is_long */ true);
+  GenOneBit(GetAssembler(), codegen_, invoke, /* is_high= */ true, /* is_long= */ true);
 }
 
 void IntrinsicLocationsBuilderX86_64::VisitIntegerLowestOneBit(HInvoke* invoke) {
-  CreateOneBitLocations(allocator_, invoke, /* is_high */ false);
+  CreateOneBitLocations(allocator_, invoke, /* is_high= */ false);
 }
 
 void IntrinsicCodeGeneratorX86_64::VisitIntegerLowestOneBit(HInvoke* invoke) {
-  GenOneBit(GetAssembler(), codegen_, invoke, /* is_high */ false, /* is_long */ false);
+  GenOneBit(GetAssembler(), codegen_, invoke, /* is_high= */ false, /* is_long= */ false);
 }
 
 void IntrinsicLocationsBuilderX86_64::VisitLongLowestOneBit(HInvoke* invoke) {
-  CreateOneBitLocations(allocator_, invoke, /* is_high */ false);
+  CreateOneBitLocations(allocator_, invoke, /* is_high= */ false);
 }
 
 void IntrinsicCodeGeneratorX86_64::VisitLongLowestOneBit(HInvoke* invoke) {
-  GenOneBit(GetAssembler(), codegen_, invoke, /* is_high */ false, /* is_long */ true);
+  GenOneBit(GetAssembler(), codegen_, invoke, /* is_high= */ false, /* is_long= */ true);
 }
 
 static void CreateLeadingZeroLocations(ArenaAllocator* allocator, HInvoke* invoke) {
@@ -2569,7 +2569,7 @@
 }
 
 void IntrinsicCodeGeneratorX86_64::VisitIntegerNumberOfLeadingZeros(HInvoke* invoke) {
-  GenLeadingZeros(GetAssembler(), codegen_, invoke, /* is_long */ false);
+  GenLeadingZeros(GetAssembler(), codegen_, invoke, /* is_long= */ false);
 }
 
 void IntrinsicLocationsBuilderX86_64::VisitLongNumberOfLeadingZeros(HInvoke* invoke) {
@@ -2577,7 +2577,7 @@
 }
 
 void IntrinsicCodeGeneratorX86_64::VisitLongNumberOfLeadingZeros(HInvoke* invoke) {
-  GenLeadingZeros(GetAssembler(), codegen_, invoke, /* is_long */ true);
+  GenLeadingZeros(GetAssembler(), codegen_, invoke, /* is_long= */ true);
 }
 
 static void CreateTrailingZeroLocations(ArenaAllocator* allocator, HInvoke* invoke) {
@@ -2637,7 +2637,7 @@
 }
 
 void IntrinsicCodeGeneratorX86_64::VisitIntegerNumberOfTrailingZeros(HInvoke* invoke) {
-  GenTrailingZeros(GetAssembler(), codegen_, invoke, /* is_long */ false);
+  GenTrailingZeros(GetAssembler(), codegen_, invoke, /* is_long= */ false);
 }
 
 void IntrinsicLocationsBuilderX86_64::VisitLongNumberOfTrailingZeros(HInvoke* invoke) {
@@ -2645,7 +2645,7 @@
 }
 
 void IntrinsicCodeGeneratorX86_64::VisitLongNumberOfTrailingZeros(HInvoke* invoke) {
-  GenTrailingZeros(GetAssembler(), codegen_, invoke, /* is_long */ true);
+  GenTrailingZeros(GetAssembler(), codegen_, invoke, /* is_long= */ true);
 }
 
 void IntrinsicLocationsBuilderX86_64::VisitIntegerValueOf(HInvoke* invoke) {
@@ -2716,7 +2716,7 @@
   X86_64Assembler* assembler = GetAssembler();
   CpuRegister out = invoke->GetLocations()->Out().AsRegister<CpuRegister>();
   Address address = Address::Absolute
-      (Thread::InterruptedOffset<kX86_64PointerSize>().Int32Value(), /* no_rip */ true);
+      (Thread::InterruptedOffset<kX86_64PointerSize>().Int32Value(), /* no_rip= */ true);
   NearLabel done;
   __ gs()->movl(out, address);
   __ testl(out, out);
@@ -2739,6 +2739,7 @@
 UNIMPLEMENTED_INTRINSIC(X86_64, DoubleIsInfinite)
 UNIMPLEMENTED_INTRINSIC(X86_64, CRC32Update)
 UNIMPLEMENTED_INTRINSIC(X86_64, CRC32UpdateBytes)
+UNIMPLEMENTED_INTRINSIC(X86_64, CRC32UpdateByteBuffer)
 
 UNIMPLEMENTED_INTRINSIC(X86_64, StringStringIndexOf);
 UNIMPLEMENTED_INTRINSIC(X86_64, StringStringIndexOfAfter);
diff --git a/compiler/optimizing/loop_optimization_test.cc b/compiler/optimizing/loop_optimization_test.cc
index c7cc661..310d98b 100644
--- a/compiler/optimizing/loop_optimization_test.cc
+++ b/compiler/optimizing/loop_optimization_test.cc
@@ -30,7 +30,7 @@
       : graph_(CreateGraph()),
         iva_(new (GetAllocator()) HInductionVarAnalysis(graph_)),
         loop_opt_(new (GetAllocator()) HLoopOptimization(
-            graph_, /* compiler_options */ nullptr, iva_, /* stats */ nullptr)) {
+            graph_, /* compiler_options= */ nullptr, iva_, /* stats= */ nullptr)) {
     BuildGraph();
   }
 
diff --git a/compiler/optimizing/nodes.cc b/compiler/optimizing/nodes.cc
index d1fba31..e9a2f96 100644
--- a/compiler/optimizing/nodes.cc
+++ b/compiler/optimizing/nodes.cc
@@ -44,7 +44,7 @@
   // Create the inexact Object reference type and store it in the HGraph.
   inexact_object_rti_ = ReferenceTypeInfo::Create(
       handles->NewHandle(GetClassRoot<mirror::Object>()),
-      /* is_exact */ false);
+      /* is_exact= */ false);
 }
 
 void HGraph::AddBlock(HBasicBlock* block) {
@@ -60,7 +60,7 @@
   ScopedArenaAllocator allocator(GetArenaStack());
   // Nodes that we're currently visiting, indexed by block id.
   ArenaBitVector visiting(
-      &allocator, blocks_.size(), /* expandable */ false, kArenaAllocGraphBuilder);
+      &allocator, blocks_.size(), /* expandable= */ false, kArenaAllocGraphBuilder);
   visiting.ClearAllBits();
   // Number of successors visited from a given node, indexed by block id.
   ScopedArenaVector<size_t> successors_visited(blocks_.size(),
@@ -826,7 +826,7 @@
     ScopedArenaAllocator allocator(graph->GetArenaStack());
     ArenaBitVector visited(&allocator,
                            graph->GetBlocks().size(),
-                           /* expandable */ false,
+                           /* expandable= */ false,
                            kArenaAllocGraphBuilder);
     visited.ClearAllBits();
     // Stop marking blocks at the loop header.
@@ -2527,7 +2527,7 @@
         current->SetGraph(outer_graph);
         outer_graph->AddBlock(current);
         outer_graph->reverse_post_order_[++index_of_at] = current;
-        UpdateLoopAndTryInformationOfNewBlock(current, at,  /* replace_if_back_edge */ false);
+        UpdateLoopAndTryInformationOfNewBlock(current, at,  /* replace_if_back_edge= */ false);
       }
     }
 
@@ -2537,7 +2537,7 @@
     outer_graph->reverse_post_order_[++index_of_at] = to;
     // Only `to` can become a back edge, as the inlined blocks
     // are predecessors of `to`.
-    UpdateLoopAndTryInformationOfNewBlock(to, at, /* replace_if_back_edge */ true);
+    UpdateLoopAndTryInformationOfNewBlock(to, at, /* replace_if_back_edge= */ true);
 
     // Update all predecessors of the exit block (now the `to` block)
     // to not `HReturn` but `HGoto` instead. Special case throwing blocks
@@ -2711,13 +2711,13 @@
   DCHECK((old_pre_header->GetLoopInformation() == nullptr) ||
          !old_pre_header->GetLoopInformation()->IsBackEdge(*old_pre_header));
   UpdateLoopAndTryInformationOfNewBlock(
-      if_block, old_pre_header, /* replace_if_back_edge */ false);
+      if_block, old_pre_header, /* replace_if_back_edge= */ false);
   UpdateLoopAndTryInformationOfNewBlock(
-      true_block, old_pre_header, /* replace_if_back_edge */ false);
+      true_block, old_pre_header, /* replace_if_back_edge= */ false);
   UpdateLoopAndTryInformationOfNewBlock(
-      false_block, old_pre_header, /* replace_if_back_edge */ false);
+      false_block, old_pre_header, /* replace_if_back_edge= */ false);
   UpdateLoopAndTryInformationOfNewBlock(
-      new_pre_header, old_pre_header, /* replace_if_back_edge */ false);
+      new_pre_header, old_pre_header, /* replace_if_back_edge= */ false);
 }
 
 HBasicBlock* HGraph::TransformLoopForVectorization(HBasicBlock* header,
diff --git a/compiler/optimizing/nodes.h b/compiler/optimizing/nodes.h
index 13c8684..686a2de 100644
--- a/compiler/optimizing/nodes.h
+++ b/compiler/optimizing/nodes.h
@@ -3242,7 +3242,7 @@
             SideEffects::All(),
             dex_pc,
             allocator,
-            /* number_of_inputs */ 1,
+            /* number_of_inputs= */ 1,
             kArenaAllocMisc) {
     SetPackedFlag<kFieldCanBeMoved>(false);
     SetPackedField<DeoptimizeKindField>(kind);
@@ -3267,7 +3267,7 @@
             SideEffects::CanTriggerGC(),
             dex_pc,
             allocator,
-            /* number_of_inputs */ 2,
+            /* number_of_inputs= */ 2,
             kArenaAllocMisc) {
     SetPackedFlag<kFieldCanBeMoved>(true);
     SetPackedField<DeoptimizeKindField>(kind);
@@ -4399,7 +4399,7 @@
       : HInvoke(kInvokeUnresolved,
                 allocator,
                 number_of_arguments,
-                0u /* number_of_other_inputs */,
+                /* number_of_other_inputs= */ 0u,
                 return_type,
                 dex_pc,
                 dex_method_index,
@@ -4425,7 +4425,7 @@
       : HInvoke(kInvokePolymorphic,
                 allocator,
                 number_of_arguments,
-                0u /* number_of_other_inputs */,
+                /* number_of_other_inputs= */ 0u,
                 return_type,
                 dex_pc,
                 dex_method_index,
@@ -4451,11 +4451,11 @@
       : HInvoke(kInvokeCustom,
                 allocator,
                 number_of_arguments,
-                /* number_of_other_inputs */ 0u,
+                /* number_of_other_inputs= */ 0u,
                 return_type,
                 dex_pc,
-                /* dex_method_index */ dex::kDexNoIndex,
-                /* resolved_method */ nullptr,
+                /* dex_method_index= */ dex::kDexNoIndex,
+                /* resolved_method= */ nullptr,
                 kStatic),
       call_site_index_(call_site_index) {
   }
@@ -5894,7 +5894,7 @@
                  type,
                  SideEffects::ArrayReadOfType(type),
                  dex_pc,
-                 /* is_string_char_at */ false) {
+                 /* is_string_char_at= */ false) {
   }
 
   HArrayGet(HInstruction* array,
@@ -6336,7 +6336,7 @@
   ReferenceTypeInfo GetLoadedClassRTI() {
     if (GetPackedFlag<kFlagValidLoadedClassRTI>()) {
       // Note: The is_exact flag from the return value should not be used.
-      return ReferenceTypeInfo::CreateUnchecked(klass_, /* is_exact */ true);
+      return ReferenceTypeInfo::CreateUnchecked(klass_, /* is_exact= */ true);
     } else {
       return ReferenceTypeInfo::CreateInvalid();
     }
@@ -7089,7 +7089,7 @@
           side_effects,
           dex_pc,
           allocator,
-          /* number_of_inputs */ check_kind == TypeCheckKind::kBitstringCheck ? 4u : 2u,
+          /* number_of_inputs= */ check_kind == TypeCheckKind::kBitstringCheck ? 4u : 2u,
           kArenaAllocTypeCheckInputs),
         klass_(klass) {
     SetPackedField<TypeCheckKindField>(check_kind);
@@ -7145,7 +7145,7 @@
   ReferenceTypeInfo GetTargetClassRTI() {
     if (GetPackedFlag<kFlagValidTargetClassRTI>()) {
       // Note: The is_exact flag from the return value should not be used.
-      return ReferenceTypeInfo::CreateUnchecked(klass_, /* is_exact */ true);
+      return ReferenceTypeInfo::CreateUnchecked(klass_, /* is_exact= */ true);
     } else {
       return ReferenceTypeInfo::CreateInvalid();
     }
@@ -7458,7 +7458,7 @@
                                       SideEffects::AllReads(),
                                       dex_pc,
                                       allocator,
-                                      /* number_of_inputs */ 1,
+                                      /* number_of_inputs= */ 1,
                                       kArenaAllocConstructorFenceInputs) {
     DCHECK(fence_object != nullptr);
     SetRawInputAt(0, fence_object);
diff --git a/compiler/optimizing/nodes_vector.h b/compiler/optimizing/nodes_vector.h
index cd4f45e..efe4d6b 100644
--- a/compiler/optimizing/nodes_vector.h
+++ b/compiler/optimizing/nodes_vector.h
@@ -207,7 +207,7 @@
                       allocator,
                       packed_type,
                       SideEffects::None(),
-                      /* number_of_inputs */ 1,
+                      /* number_of_inputs= */ 1,
                       vector_length,
                       dex_pc) {
     SetRawInputAt(0, input);
@@ -235,7 +235,7 @@
                       allocator,
                       packed_type,
                       SideEffects::None(),
-                      /* number_of_inputs */ 2,
+                      /* number_of_inputs= */ 2,
                       vector_length,
                       dex_pc) {
     SetRawInputAt(0, left);
@@ -948,7 +948,7 @@
                       allocator,
                       packed_type,
                       SideEffects::None(),
-                      /* number_of_inputs */ 3,
+                      /* number_of_inputs= */ 3,
                       vector_length,
                       dex_pc),
         op_kind_(op) {
@@ -1002,7 +1002,7 @@
                       allocator,
                       packed_type,
                       SideEffects::None(),
-                      /* number_of_inputs */ 3,
+                      /* number_of_inputs= */ 3,
                       vector_length,
                       dex_pc) {
     DCHECK(HasConsistentPackedTypes(accumulator, packed_type));
@@ -1049,7 +1049,7 @@
                     allocator,
                     packed_type,
                     SideEffects::None(),
-                    /* number_of_inputs */ 3,
+                    /* number_of_inputs= */ 3,
                     vector_length,
                     dex_pc) {
     DCHECK(HasConsistentPackedTypes(accumulator, packed_type));
@@ -1097,7 +1097,7 @@
                             allocator,
                             packed_type,
                             side_effects,
-                            /* number_of_inputs */ 2,
+                            /* number_of_inputs= */ 2,
                             vector_length,
                             dex_pc) {
     SetRawInputAt(0, base);
@@ -1143,7 +1143,7 @@
                             allocator,
                             packed_type,
                             side_effects,
-                            /* number_of_inputs */ 3,
+                            /* number_of_inputs= */ 3,
                             vector_length,
                             dex_pc) {
     DCHECK(HasConsistentPackedTypes(value, packed_type));
diff --git a/compiler/optimizing/optimization.cc b/compiler/optimizing/optimization.cc
index b75afad..8864a12 100644
--- a/compiler/optimizing/optimization.cc
+++ b/compiler/optimizing/optimization.cc
@@ -260,9 +260,9 @@
                                        handles,
                                        stats,
                                        accessor.RegistersSize(),
-                                       /* total_number_of_instructions */ 0,
-                                       /* parent */ nullptr,
-                                       /* depth */ 0,
+                                       /* total_number_of_instructions= */ 0,
+                                       /* parent= */ nullptr,
+                                       /* depth= */ 0,
                                        pass_name);
         break;
       }
diff --git a/compiler/optimizing/optimizing_compiler.cc b/compiler/optimizing/optimizing_compiler.cc
index c9b4d36..a8fa370 100644
--- a/compiler/optimizing/optimizing_compiler.cc
+++ b/compiler/optimizing/optimizing_compiler.cc
@@ -162,7 +162,7 @@
     VLOG(compiler) << "Starting pass: " << pass_name;
     // Dump graph first, then start timer.
     if (visualizer_enabled_) {
-      visualizer_.DumpGraph(pass_name, /* is_after_pass */ false, graph_in_bad_state_);
+      visualizer_.DumpGraph(pass_name, /* is_after_pass= */ false, graph_in_bad_state_);
       FlushVisualizer();
     }
     if (timing_logger_enabled_) {
@@ -184,7 +184,7 @@
       timing_logger_.EndTiming();
     }
     if (visualizer_enabled_) {
-      visualizer_.DumpGraph(pass_name, /* is_after_pass */ true, graph_in_bad_state_);
+      visualizer_.DumpGraph(pass_name, /* is_after_pass= */ true, graph_in_bad_state_);
       FlushVisualizer();
     }
 
@@ -964,7 +964,7 @@
       compiler_options.GetInstructionSet(),
       kInvalidInvokeType,
       compiler_options.GetDebuggable(),
-      /* osr */ false);
+      /* osr= */ false);
 
   DCHECK(Runtime::Current()->IsAotCompiler());
   DCHECK(method != nullptr);
@@ -994,7 +994,7 @@
                           &dex_compilation_unit,
                           codegen.get(),
                           compilation_stats_.get(),
-                          /* interpreter_metadata */ ArrayRef<const uint8_t>(),
+                          /* interpreter_metadata= */ ArrayRef<const uint8_t>(),
                           handles);
     builder.BuildIntrinsicGraph(method);
   }
@@ -1161,7 +1161,7 @@
       jni_compiled_method.GetFrameSize(),
       jni_compiled_method.GetCoreSpillMask(),
       jni_compiled_method.GetFpSpillMask(),
-      /* num_dex_registers */ 0);
+      /* num_dex_registers= */ 0);
   stack_map_stream->EndMethod();
   return stack_map_stream->Encode();
 }
@@ -1208,7 +1208,7 @@
         CompiledMethod* compiled_method = Emit(&allocator,
                                                &code_allocator,
                                                codegen.get(),
-                                               /* code_item_for_osr_check */ nullptr);
+                                               /* item= */ nullptr);
         compiled_method->MarkAsIntrinsic();
         return compiled_method;
       }
@@ -1228,7 +1228,7 @@
       jni_compiled_method.GetCode(),
       ArrayRef<const uint8_t>(stack_map),
       jni_compiled_method.GetCfi(),
-      /* patches */ ArrayRef<const linker::LinkerPatch>());
+      /* patches= */ ArrayRef<const linker::LinkerPatch>());
 }
 
 Compiler* CreateOptimizingCompiler(CompilerDriver* driver) {
@@ -1277,7 +1277,7 @@
     uint8_t* roots_data = nullptr;
     uint32_t data_size = code_cache->ReserveData(self,
                                                  stack_map.size(),
-                                                 /* number_of_roots */ 0,
+                                                 /* number_of_roots= */ 0,
                                                  method,
                                                  &stack_map_data,
                                                  &roots_data);
@@ -1297,7 +1297,7 @@
         data_size,
         osr,
         roots,
-        /* has_should_deoptimize_flag */ false,
+        /* has_should_deoptimize_flag= */ false,
         cha_single_implementation_list);
     if (code == nullptr) {
       return false;
@@ -1456,8 +1456,8 @@
   return true;
 }
 
-void OptimizingCompiler::GenerateJitDebugInfo(
-    ArtMethod* method, const debug::MethodDebugInfo& info) {
+void OptimizingCompiler::GenerateJitDebugInfo(ArtMethod* method ATTRIBUTE_UNUSED,
+                                              const debug::MethodDebugInfo& info) {
   const CompilerOptions& compiler_options = GetCompilerDriver()->GetCompilerOptions();
   DCHECK(compiler_options.GenerateAnyDebugInfo());
 
@@ -1472,12 +1472,10 @@
       info);
   AddNativeDebugInfoForJit(Thread::Current(),
                            reinterpret_cast<const void*>(info.code_address),
-                           elf_file);
-
-  VLOG(jit)
-      << "JIT mini-debug-info added for " << ArtMethod::PrettyMethod(method)
-      << " size=" << PrettySize(elf_file.size())
-      << " total_size=" << PrettySize(GetJitMiniDebugInfoMemUsage());
+                           elf_file,
+                           debug::PackElfFileForJIT,
+                           compiler_options.GetInstructionSet(),
+                           compiler_options.GetInstructionSetFeatures());
 }
 
 }  // namespace art
diff --git a/compiler/optimizing/optimizing_unit_test.h b/compiler/optimizing/optimizing_unit_test.h
index f903f82..4e376b1 100644
--- a/compiler/optimizing/optimizing_unit_test.h
+++ b/compiler/optimizing/optimizing_unit_test.h
@@ -165,13 +165,13 @@
       const DexCompilationUnit* dex_compilation_unit =
           new (graph->GetAllocator()) DexCompilationUnit(
               handles_->NewHandle<mirror::ClassLoader>(nullptr),
-              /* class_linker */ nullptr,
+              /* class_linker= */ nullptr,
               graph->GetDexFile(),
               code_item,
-              /* class_def_index */ DexFile::kDexNoIndex16,
-              /* method_idx */ dex::kDexNoIndex,
-              /* access_flags */ 0u,
-              /* verified_method */ nullptr,
+              /* class_def_index= */ DexFile::kDexNoIndex16,
+              /* method_idx= */ dex::kDexNoIndex,
+              /* access_flags= */ 0u,
+              /* verified_method= */ nullptr,
               handles_->NewHandle<mirror::DexCache>(nullptr));
       CodeItemDebugInfoAccessor accessor(graph->GetDexFile(), code_item, /*dex_method_idx*/ 0u);
       HGraphBuilder builder(graph, dex_compilation_unit, accessor, handles_.get(), return_type);
diff --git a/compiler/optimizing/prepare_for_register_allocation.cc b/compiler/optimizing/prepare_for_register_allocation.cc
index 12db8a0..fbdbf9d 100644
--- a/compiler/optimizing/prepare_for_register_allocation.cc
+++ b/compiler/optimizing/prepare_for_register_allocation.cc
@@ -87,9 +87,9 @@
     if (GetGraph()->GetArtMethod() != char_at_method) {
       ArenaAllocator* allocator = GetGraph()->GetAllocator();
       HEnvironment* environment = new (allocator) HEnvironment(allocator,
-                                                               /* number_of_vregs */ 0u,
+                                                               /* number_of_vregs= */ 0u,
                                                                char_at_method,
-                                                               /* dex_pc */ dex::kDexNoIndex,
+                                                               /* dex_pc= */ dex::kDexNoIndex,
                                                                check);
       check->InsertRawEnvironment(environment);
     }
diff --git a/compiler/optimizing/reference_type_propagation.cc b/compiler/optimizing/reference_type_propagation.cc
index 9079658..61e7a60 100644
--- a/compiler/optimizing/reference_type_propagation.cc
+++ b/compiler/optimizing/reference_type_propagation.cc
@@ -278,7 +278,7 @@
       if (ShouldCreateBoundType(
             insert_point, receiver, class_rti, start_instruction, start_block)) {
         bound_type = new (receiver->GetBlock()->GetGraph()->GetAllocator()) HBoundType(receiver);
-        bound_type->SetUpperBound(class_rti, /* bound_can_be_null */ false);
+        bound_type->SetUpperBound(class_rti, /* can_be_null= */ false);
         start_block->InsertInstructionBefore(bound_type, insert_point);
         // To comply with the RTP algorithm, don't type the bound type just yet, it will
         // be handled in RTPVisitor::VisitBoundType.
@@ -350,7 +350,7 @@
     HBasicBlock* trueBlock = compare->IsEqual()
         ? check->AsIf()->IfTrueSuccessor()
         : check->AsIf()->IfFalseSuccessor();
-    BoundTypeIn(receiver, trueBlock, /* start_instruction */ nullptr, class_rti);
+    BoundTypeIn(receiver, trueBlock, /* start_instruction= */ nullptr, class_rti);
   } else {
     DCHECK(check->IsDeoptimize());
     if (compare->IsEqual() && check->AsDeoptimize()->GuardsAnInput()) {
@@ -427,9 +427,9 @@
       : ifInstruction->IfFalseSuccessor();
 
   ReferenceTypeInfo object_rti = ReferenceTypeInfo::Create(
-      handle_cache_->GetObjectClassHandle(), /* is_exact */ false);
+      handle_cache_->GetObjectClassHandle(), /* is_exact= */ false);
 
-  BoundTypeIn(obj, notNullBlock, /* start_instruction */ nullptr, object_rti);
+  BoundTypeIn(obj, notNullBlock, /* start_instruction= */ nullptr, object_rti);
 }
 
 // Returns true if one of the patterns below has been recognized. If so, the
@@ -538,10 +538,10 @@
   {
     ScopedObjectAccess soa(Thread::Current());
     if (!class_rti.GetTypeHandle()->CannotBeAssignedFromOtherTypes()) {
-      class_rti = ReferenceTypeInfo::Create(class_rti.GetTypeHandle(), /* is_exact */ false);
+      class_rti = ReferenceTypeInfo::Create(class_rti.GetTypeHandle(), /* is_exact= */ false);
     }
   }
-  BoundTypeIn(obj, instanceOfTrueBlock, /* start_instruction */ nullptr, class_rti);
+  BoundTypeIn(obj, instanceOfTrueBlock, /* start_instruction= */ nullptr, class_rti);
 }
 
 void ReferenceTypePropagation::RTPVisitor::SetClassAsTypeInfo(HInstruction* instr,
@@ -561,7 +561,7 @@
       // Use a null loader, the target method is in a boot classpath dex file.
       Handle<mirror::ClassLoader> loader(hs.NewHandle<mirror::ClassLoader>(nullptr));
       ArtMethod* method = cl->ResolveMethod<ClassLinker::ResolveMode::kNoChecks>(
-          dex_method_index, dex_cache, loader, /* referrer */ nullptr, kDirect);
+          dex_method_index, dex_cache, loader, /* referrer= */ nullptr, kDirect);
       DCHECK(method != nullptr);
       ObjPtr<mirror::Class> declaring_class = method->GetDeclaringClass();
       DCHECK(declaring_class != nullptr);
@@ -571,7 +571,7 @@
           << "Expected String.<init>: " << method->PrettyMethod();
     }
     instr->SetReferenceTypeInfo(
-        ReferenceTypeInfo::Create(handle_cache_->GetStringClassHandle(), /* is_exact */ true));
+        ReferenceTypeInfo::Create(handle_cache_->GetStringClassHandle(), /* is_exact= */ true));
   } else if (IsAdmissible(klass)) {
     ReferenceTypeInfo::TypeHandle handle = handle_cache_->NewHandle(klass);
     is_exact = is_exact || handle->CannotBeAssignedFromOtherTypes();
@@ -600,12 +600,12 @@
 
 void ReferenceTypePropagation::RTPVisitor::VisitNewInstance(HNewInstance* instr) {
   ScopedObjectAccess soa(Thread::Current());
-  SetClassAsTypeInfo(instr, instr->GetLoadClass()->GetClass().Get(), /* is_exact */ true);
+  SetClassAsTypeInfo(instr, instr->GetLoadClass()->GetClass().Get(), /* is_exact= */ true);
 }
 
 void ReferenceTypePropagation::RTPVisitor::VisitNewArray(HNewArray* instr) {
   ScopedObjectAccess soa(Thread::Current());
-  SetClassAsTypeInfo(instr, instr->GetLoadClass()->GetClass().Get(), /* is_exact */ true);
+  SetClassAsTypeInfo(instr, instr->GetLoadClass()->GetClass().Get(), /* is_exact= */ true);
 }
 
 void ReferenceTypePropagation::RTPVisitor::VisitParameterValue(HParameterValue* instr) {
@@ -614,7 +614,7 @@
     UpdateReferenceTypeInfo(instr,
                             instr->GetTypeIndex(),
                             instr->GetDexFile(),
-                            /* is_exact */ false);
+                            /* is_exact= */ false);
   }
 }
 
@@ -632,7 +632,7 @@
     klass = info.GetField()->LookupResolvedType();
   }
 
-  SetClassAsTypeInfo(instr, klass, /* is_exact */ false);
+  SetClassAsTypeInfo(instr, klass, /* is_exact= */ false);
 }
 
 void ReferenceTypePropagation::RTPVisitor::VisitInstanceFieldGet(HInstanceFieldGet* instr) {
@@ -665,7 +665,7 @@
     instr->SetValidLoadedClassRTI();
   }
   instr->SetReferenceTypeInfo(
-      ReferenceTypeInfo::Create(handle_cache_->GetClassClassHandle(), /* is_exact */ true));
+      ReferenceTypeInfo::Create(handle_cache_->GetClassClassHandle(), /* is_exact= */ true));
 }
 
 void ReferenceTypePropagation::RTPVisitor::VisitInstanceOf(HInstanceOf* instr) {
@@ -682,17 +682,17 @@
 void ReferenceTypePropagation::RTPVisitor::VisitLoadMethodHandle(HLoadMethodHandle* instr) {
   instr->SetReferenceTypeInfo(ReferenceTypeInfo::Create(
       handle_cache_->GetMethodHandleClassHandle(),
-      /* is_exact */ true));
+      /* is_exact= */ true));
 }
 
 void ReferenceTypePropagation::RTPVisitor::VisitLoadMethodType(HLoadMethodType* instr) {
   instr->SetReferenceTypeInfo(
-      ReferenceTypeInfo::Create(handle_cache_->GetMethodTypeClassHandle(), /* is_exact */ true));
+      ReferenceTypeInfo::Create(handle_cache_->GetMethodTypeClassHandle(), /* is_exact= */ true));
 }
 
 void ReferenceTypePropagation::RTPVisitor::VisitLoadString(HLoadString* instr) {
   instr->SetReferenceTypeInfo(
-      ReferenceTypeInfo::Create(handle_cache_->GetStringClassHandle(), /* is_exact */ true));
+      ReferenceTypeInfo::Create(handle_cache_->GetStringClassHandle(), /* is_exact= */ true));
 }
 
 void ReferenceTypePropagation::RTPVisitor::VisitLoadException(HLoadException* instr) {
@@ -701,12 +701,12 @@
 
   if (catch_info->IsCatchAllTypeIndex()) {
     instr->SetReferenceTypeInfo(
-        ReferenceTypeInfo::Create(handle_cache_->GetThrowableClassHandle(), /* is_exact */ false));
+        ReferenceTypeInfo::Create(handle_cache_->GetThrowableClassHandle(), /* is_exact= */ false));
   } else {
     UpdateReferenceTypeInfo(instr,
                             catch_info->GetCatchTypeIndex(),
                             catch_info->GetCatchDexFile(),
-                            /* is_exact */ false);
+                            /* is_exact= */ false);
   }
 }
 
@@ -736,7 +736,7 @@
         // bound type is dead. To not confuse potential other optimizations, we mark
         // the bound as non-exact.
         instr->SetReferenceTypeInfo(
-            ReferenceTypeInfo::Create(class_rti.GetTypeHandle(), /* is_exact */ false));
+            ReferenceTypeInfo::Create(class_rti.GetTypeHandle(), /* is_exact= */ false));
       }
     } else {
       // Object not typed yet. Leave BoundType untyped for now rather than
@@ -914,7 +914,7 @@
   ScopedObjectAccess soa(Thread::Current());
   ArtMethod* method = instr->GetResolvedMethod();
   ObjPtr<mirror::Class> klass = (method == nullptr) ? nullptr : method->LookupResolvedReturnType();
-  SetClassAsTypeInfo(instr, klass, /* is_exact */ false);
+  SetClassAsTypeInfo(instr, klass, /* is_exact= */ false);
 }
 
 void ReferenceTypePropagation::RTPVisitor::VisitArrayGet(HArrayGet* instr) {
@@ -947,7 +947,7 @@
     // bound type is dead. To not confuse potential other optimizations, we mark
     // the bound as non-exact.
     instr->SetReferenceTypeInfo(
-        ReferenceTypeInfo::Create(upper_bound_rti.GetTypeHandle(), /* is_exact */ false));
+        ReferenceTypeInfo::Create(upper_bound_rti.GetTypeHandle(), /* is_exact= */ false));
   }
 }
 
diff --git a/compiler/optimizing/register_allocation_resolver.cc b/compiler/optimizing/register_allocation_resolver.cc
index 27f9ac3..b1f0a1a 100644
--- a/compiler/optimizing/register_allocation_resolver.cc
+++ b/compiler/optimizing/register_allocation_resolver.cc
@@ -280,16 +280,16 @@
     LocationSummary* locations = instruction->GetLocations();
     if (locations->OnlyCallsOnSlowPath()) {
       size_t core_spills =
-          codegen_->GetNumberOfSlowPathSpills(locations, /* core_registers */ true);
+          codegen_->GetNumberOfSlowPathSpills(locations, /* core_registers= */ true);
       size_t fp_spills =
-          codegen_->GetNumberOfSlowPathSpills(locations, /* core_registers */ false);
+          codegen_->GetNumberOfSlowPathSpills(locations, /* core_registers= */ false);
       size_t spill_size =
           core_register_spill_size * core_spills + fp_register_spill_size * fp_spills;
       maximum_safepoint_spill_size = std::max(maximum_safepoint_spill_size, spill_size);
     } else if (locations->CallsOnMainAndSlowPath()) {
       // Nothing to spill on the slow path if the main path already clobbers caller-saves.
-      DCHECK_EQ(0u, codegen_->GetNumberOfSlowPathSpills(locations, /* core_registers */ true));
-      DCHECK_EQ(0u, codegen_->GetNumberOfSlowPathSpills(locations, /* core_registers */ false));
+      DCHECK_EQ(0u, codegen_->GetNumberOfSlowPathSpills(locations, /* core_registers= */ true));
+      DCHECK_EQ(0u, codegen_->GetNumberOfSlowPathSpills(locations, /* core_registers= */ false));
     }
   }
   return maximum_safepoint_spill_size;
diff --git a/compiler/optimizing/register_allocator_linear_scan.cc b/compiler/optimizing/register_allocator_linear_scan.cc
index 1e00003..0d6c5a3 100644
--- a/compiler/optimizing/register_allocator_linear_scan.cc
+++ b/compiler/optimizing/register_allocator_linear_scan.cc
@@ -252,7 +252,7 @@
           temp_intervals_.push_back(interval);
           interval->AddTempUse(instruction, i);
           if (codegen_->NeedsTwoRegisters(DataType::Type::kFloat64)) {
-            interval->AddHighInterval(/* is_temp */ true);
+            interval->AddHighInterval(/* is_temp= */ true);
             LiveInterval* high = interval->GetHighInterval();
             temp_intervals_.push_back(high);
             unhandled_fp_intervals_.push_back(high);
@@ -284,7 +284,7 @@
   }
 
   if (locations->WillCall()) {
-    BlockRegisters(position, position + 1, /* caller_save_only */ true);
+    BlockRegisters(position, position + 1, /* caller_save_only= */ true);
   }
 
   for (size_t i = 0; i < locations->GetInputCount(); ++i) {
diff --git a/compiler/optimizing/register_allocator_test.cc b/compiler/optimizing/register_allocator_test.cc
index be5304c..79eb082 100644
--- a/compiler/optimizing/register_allocator_test.cc
+++ b/compiler/optimizing/register_allocator_test.cc
@@ -68,11 +68,11 @@
   bool ValidateIntervals(const ScopedArenaVector<LiveInterval*>& intervals,
                          const CodeGenerator& codegen) {
     return RegisterAllocator::ValidateIntervals(ArrayRef<LiveInterval* const>(intervals),
-                                                /* number_of_spill_slots */ 0u,
-                                                /* number_of_out_slots */ 0u,
+                                                /* number_of_spill_slots= */ 0u,
+                                                /* number_of_out_slots= */ 0u,
                                                 codegen,
-                                                /* processing_core_registers */ true,
-                                                /* log_fatal_on_failure */ false);
+                                                /* processing_core_registers= */ true,
+                                                /* log_fatal_on_failure= */ false);
   }
 };
 
diff --git a/compiler/optimizing/scheduler.cc b/compiler/optimizing/scheduler.cc
index df897a4..fdef45e 100644
--- a/compiler/optimizing/scheduler.cc
+++ b/compiler/optimizing/scheduler.cc
@@ -680,7 +680,7 @@
   DCHECK_NE(cursor, cursor->GetBlock()->GetLastInstruction());
   DCHECK(!instruction->IsControlFlow());
   DCHECK(!cursor->IsControlFlow());
-  instruction->MoveBefore(cursor->GetNext(), /* do_checks */ false);
+  instruction->MoveBefore(cursor->GetNext(), /* do_checks= */ false);
 }
 
 void HScheduler::Schedule(HInstruction* instruction) {
diff --git a/compiler/optimizing/scheduler_arm.cc b/compiler/optimizing/scheduler_arm.cc
index d89d117..858a555 100644
--- a/compiler/optimizing/scheduler_arm.cc
+++ b/compiler/optimizing/scheduler_arm.cc
@@ -563,7 +563,7 @@
     last_visited_internal_latency_ = kArmIntegerOpLatency;
     last_visited_latency_ = kArmIntegerOpLatency;
   } else {
-    HandleGenerateDataProcInstruction(/* internal_latency */ true);
+    HandleGenerateDataProcInstruction(/* internal_latency= */ true);
     HandleGenerateDataProcInstruction();
   }
 }
@@ -585,8 +585,8 @@
     DCHECK_LT(shift_value, 32U);
 
     if (kind == HInstruction::kOr || kind == HInstruction::kXor) {
-      HandleGenerateDataProcInstruction(/* internal_latency */ true);
-      HandleGenerateDataProcInstruction(/* internal_latency */ true);
+      HandleGenerateDataProcInstruction(/* internal_latency= */ true);
+      HandleGenerateDataProcInstruction(/* internal_latency= */ true);
       HandleGenerateDataProcInstruction();
     } else {
       last_visited_internal_latency_ += 2 * kArmIntegerOpLatency;
diff --git a/compiler/optimizing/scheduler_test.cc b/compiler/optimizing/scheduler_test.cc
index 981fcc4..e0e265a 100644
--- a/compiler/optimizing/scheduler_test.cc
+++ b/compiler/optimizing/scheduler_test.cc
@@ -148,7 +148,7 @@
 
     SchedulingGraph scheduling_graph(scheduler,
                                      GetScopedAllocator(),
-                                     /* heap_location_collector */ nullptr);
+                                     /* heap_location_collector= */ nullptr);
     // Instructions must be inserted in reverse order into the scheduling graph.
     for (HInstruction* instr : ReverseRange(block_instructions)) {
       scheduling_graph.AddNode(instr);
diff --git a/compiler/optimizing/side_effects_test.cc b/compiler/optimizing/side_effects_test.cc
index 4b0be07..cf26e79 100644
--- a/compiler/optimizing/side_effects_test.cc
+++ b/compiler/optimizing/side_effects_test.cc
@@ -141,13 +141,13 @@
 
 TEST(SideEffectsTest, VolatileDependences) {
   SideEffects volatile_write =
-      SideEffects::FieldWriteOfType(DataType::Type::kInt32, /* is_volatile */ true);
+      SideEffects::FieldWriteOfType(DataType::Type::kInt32, /* is_volatile= */ true);
   SideEffects any_write =
-      SideEffects::FieldWriteOfType(DataType::Type::kInt32, /* is_volatile */ false);
+      SideEffects::FieldWriteOfType(DataType::Type::kInt32, /* is_volatile= */ false);
   SideEffects volatile_read =
-      SideEffects::FieldReadOfType(DataType::Type::kInt8, /* is_volatile */ true);
+      SideEffects::FieldReadOfType(DataType::Type::kInt8, /* is_volatile= */ true);
   SideEffects any_read =
-      SideEffects::FieldReadOfType(DataType::Type::kInt8, /* is_volatile */ false);
+      SideEffects::FieldReadOfType(DataType::Type::kInt8, /* is_volatile= */ false);
 
   EXPECT_FALSE(volatile_write.MayDependOn(any_read));
   EXPECT_TRUE(any_read.MayDependOn(volatile_write));
@@ -163,15 +163,15 @@
 TEST(SideEffectsTest, SameWidthTypesNoAlias) {
   // Type I/F.
   testNoWriteAndReadDependence(
-      SideEffects::FieldWriteOfType(DataType::Type::kInt32, /* is_volatile */ false),
-      SideEffects::FieldReadOfType(DataType::Type::kFloat32, /* is_volatile */ false));
+      SideEffects::FieldWriteOfType(DataType::Type::kInt32, /* is_volatile= */ false),
+      SideEffects::FieldReadOfType(DataType::Type::kFloat32, /* is_volatile= */ false));
   testNoWriteAndReadDependence(
       SideEffects::ArrayWriteOfType(DataType::Type::kInt32),
       SideEffects::ArrayReadOfType(DataType::Type::kFloat32));
   // Type L/D.
   testNoWriteAndReadDependence(
-      SideEffects::FieldWriteOfType(DataType::Type::kInt64, /* is_volatile */ false),
-      SideEffects::FieldReadOfType(DataType::Type::kFloat64, /* is_volatile */ false));
+      SideEffects::FieldWriteOfType(DataType::Type::kInt64, /* is_volatile= */ false),
+      SideEffects::FieldReadOfType(DataType::Type::kFloat64, /* is_volatile= */ false));
   testNoWriteAndReadDependence(
       SideEffects::ArrayWriteOfType(DataType::Type::kInt64),
       SideEffects::ArrayReadOfType(DataType::Type::kFloat64));
@@ -181,9 +181,9 @@
   SideEffects s = SideEffects::None();
   // Keep taking the union of different writes and reads.
   for (DataType::Type type : kTestTypes) {
-    s = s.Union(SideEffects::FieldWriteOfType(type, /* is_volatile */ false));
+    s = s.Union(SideEffects::FieldWriteOfType(type, /* is_volatile= */ false));
     s = s.Union(SideEffects::ArrayWriteOfType(type));
-    s = s.Union(SideEffects::FieldReadOfType(type, /* is_volatile */ false));
+    s = s.Union(SideEffects::FieldReadOfType(type, /* is_volatile= */ false));
     s = s.Union(SideEffects::ArrayReadOfType(type));
   }
   EXPECT_TRUE(s.DoesAllReadWrite());
@@ -254,10 +254,10 @@
       "||I|||||",
       SideEffects::ArrayReadOfType(DataType::Type::kInt32).ToString().c_str());
   SideEffects s = SideEffects::None();
-  s = s.Union(SideEffects::FieldWriteOfType(DataType::Type::kUint16, /* is_volatile */ false));
-  s = s.Union(SideEffects::FieldWriteOfType(DataType::Type::kInt64, /* is_volatile */ false));
+  s = s.Union(SideEffects::FieldWriteOfType(DataType::Type::kUint16, /* is_volatile= */ false));
+  s = s.Union(SideEffects::FieldWriteOfType(DataType::Type::kInt64, /* is_volatile= */ false));
   s = s.Union(SideEffects::ArrayWriteOfType(DataType::Type::kInt16));
-  s = s.Union(SideEffects::FieldReadOfType(DataType::Type::kInt32, /* is_volatile */ false));
+  s = s.Union(SideEffects::FieldReadOfType(DataType::Type::kInt32, /* is_volatile= */ false));
   s = s.Union(SideEffects::ArrayReadOfType(DataType::Type::kFloat32));
   s = s.Union(SideEffects::ArrayReadOfType(DataType::Type::kFloat64));
   EXPECT_STREQ("||DF|I||S|JC|", s.ToString().c_str());
diff --git a/compiler/optimizing/ssa_builder.cc b/compiler/optimizing/ssa_builder.cc
index cef234a..0d0e1ec 100644
--- a/compiler/optimizing/ssa_builder.cc
+++ b/compiler/optimizing/ssa_builder.cc
@@ -391,7 +391,7 @@
           // succeed in code validated by the verifier.
           HInstruction* equivalent = GetFloatOrDoubleEquivalent(value, array_type);
           DCHECK(equivalent != nullptr);
-          aset->ReplaceInput(equivalent, /* input_index */ 2);
+          aset->ReplaceInput(equivalent, /* index= */ 2);
           if (equivalent->IsPhi()) {
             // Returned equivalent is a phi which may not have had its inputs
             // replaced yet. We need to run primitive type propagation on it.
@@ -525,7 +525,7 @@
                            class_loader_,
                            dex_cache_,
                            handles_,
-                           /* is_first_run */ true).Run();
+                           /* is_first_run= */ true).Run();
 
   // HInstructionBuilder duplicated ArrayGet instructions with ambiguous type
   // (int/float or long/double) and marked ArraySets with ambiguous input type.
diff --git a/compiler/optimizing/ssa_liveness_analysis.cc b/compiler/optimizing/ssa_liveness_analysis.cc
index 62a70d6..7b2c3a9 100644
--- a/compiler/optimizing/ssa_liveness_analysis.cc
+++ b/compiler/optimizing/ssa_liveness_analysis.cc
@@ -120,7 +120,7 @@
       DCHECK(input->HasSsaIndex());
       // `input` generates a result used by `current`. Add use and update
       // the live-in set.
-      input->GetLiveInterval()->AddUse(current, /* environment */ nullptr, i, actual_user);
+      input->GetLiveInterval()->AddUse(current, /* environment= */ nullptr, i, actual_user);
       live_in->SetBit(input->GetSsaIndex());
     } else if (has_out_location) {
       // `input` generates a result but it is not used by `current`.
diff --git a/compiler/optimizing/ssa_liveness_analysis_test.cc b/compiler/optimizing/ssa_liveness_analysis_test.cc
index 4b52553..352c44f 100644
--- a/compiler/optimizing/ssa_liveness_analysis_test.cc
+++ b/compiler/optimizing/ssa_liveness_analysis_test.cc
@@ -94,25 +94,25 @@
   HInstruction* null_check = new (GetAllocator()) HNullCheck(array, 0);
   block->AddInstruction(null_check);
   HEnvironment* null_check_env = new (GetAllocator()) HEnvironment(GetAllocator(),
-                                                                   /* number_of_vregs */ 5,
-                                                                   /* method */ nullptr,
-                                                                   /* dex_pc */ 0u,
+                                                                   /* number_of_vregs= */ 5,
+                                                                   /* method= */ nullptr,
+                                                                   /* dex_pc= */ 0u,
                                                                    null_check);
   null_check_env->CopyFrom(ArrayRef<HInstruction* const>(args));
   null_check->SetRawEnvironment(null_check_env);
   HInstruction* length = new (GetAllocator()) HArrayLength(array, 0);
   block->AddInstruction(length);
-  HInstruction* bounds_check = new (GetAllocator()) HBoundsCheck(index, length, /* dex_pc */ 0u);
+  HInstruction* bounds_check = new (GetAllocator()) HBoundsCheck(index, length, /* dex_pc= */ 0u);
   block->AddInstruction(bounds_check);
   HEnvironment* bounds_check_env = new (GetAllocator()) HEnvironment(GetAllocator(),
-                                                                     /* number_of_vregs */ 5,
-                                                                     /* method */ nullptr,
-                                                                     /* dex_pc */ 0u,
+                                                                     /* number_of_vregs= */ 5,
+                                                                     /* method= */ nullptr,
+                                                                     /* dex_pc= */ 0u,
                                                                      bounds_check);
   bounds_check_env->CopyFrom(ArrayRef<HInstruction* const>(args));
   bounds_check->SetRawEnvironment(bounds_check_env);
   HInstruction* array_set =
-      new (GetAllocator()) HArraySet(array, index, value, DataType::Type::kInt32, /* dex_pc */ 0);
+      new (GetAllocator()) HArraySet(array, index, value, DataType::Type::kInt32, /* dex_pc= */ 0);
   block->AddInstruction(array_set);
 
   graph_->BuildDominatorTree();
@@ -163,9 +163,9 @@
   HInstruction* null_check = new (GetAllocator()) HNullCheck(array, 0);
   block->AddInstruction(null_check);
   HEnvironment* null_check_env = new (GetAllocator()) HEnvironment(GetAllocator(),
-                                                                   /* number_of_vregs */ 5,
-                                                                   /* method */ nullptr,
-                                                                   /* dex_pc */ 0u,
+                                                                   /* number_of_vregs= */ 5,
+                                                                   /* method= */ nullptr,
+                                                                   /* dex_pc= */ 0u,
                                                                    null_check);
   null_check_env->CopyFrom(ArrayRef<HInstruction* const>(args));
   null_check->SetRawEnvironment(null_check_env);
@@ -175,17 +175,17 @@
   HInstruction* ae = new (GetAllocator()) HAboveOrEqual(index, length);
   block->AddInstruction(ae);
   HInstruction* deoptimize = new(GetAllocator()) HDeoptimize(
-      GetAllocator(), ae, DeoptimizationKind::kBlockBCE, /* dex_pc */ 0u);
+      GetAllocator(), ae, DeoptimizationKind::kBlockBCE, /* dex_pc= */ 0u);
   block->AddInstruction(deoptimize);
   HEnvironment* deoptimize_env = new (GetAllocator()) HEnvironment(GetAllocator(),
-                                                                   /* number_of_vregs */ 5,
-                                                                   /* method */ nullptr,
-                                                                   /* dex_pc */ 0u,
+                                                                   /* number_of_vregs= */ 5,
+                                                                   /* method= */ nullptr,
+                                                                   /* dex_pc= */ 0u,
                                                                    deoptimize);
   deoptimize_env->CopyFrom(ArrayRef<HInstruction* const>(args));
   deoptimize->SetRawEnvironment(deoptimize_env);
   HInstruction* array_set =
-      new (GetAllocator()) HArraySet(array, index, value, DataType::Type::kInt32, /* dex_pc */ 0);
+      new (GetAllocator()) HArraySet(array, index, value, DataType::Type::kInt32, /* dex_pc= */ 0);
   block->AddInstruction(array_set);
 
   graph_->BuildDominatorTree();
diff --git a/compiler/optimizing/ssa_phi_elimination.cc b/compiler/optimizing/ssa_phi_elimination.cc
index 5370f43..3fcb72e 100644
--- a/compiler/optimizing/ssa_phi_elimination.cc
+++ b/compiler/optimizing/ssa_phi_elimination.cc
@@ -141,7 +141,7 @@
 
   ArenaBitVector visited_phis_in_cycle(&allocator,
                                        graph_->GetCurrentInstructionId(),
-                                       /* expandable */ false,
+                                       /* expandable= */ false,
                                        kArenaAllocSsaPhiElimination);
   visited_phis_in_cycle.ClearAllBits();
   ScopedArenaVector<HPhi*> cycle_worklist(allocator.Adapter(kArenaAllocSsaPhiElimination));
diff --git a/compiler/optimizing/superblock_cloner.h b/compiler/optimizing/superblock_cloner.h
index f211721..dbe9008 100644
--- a/compiler/optimizing/superblock_cloner.h
+++ b/compiler/optimizing/superblock_cloner.h
@@ -372,8 +372,8 @@
   // Returns whether the loop can be peeled/unrolled.
   bool IsLoopClonable() const { return cloner_.IsSubgraphClonable(); }
 
-  HBasicBlock* DoPeeling() { return DoPeelUnrollImpl(/* to_unroll */ false); }
-  HBasicBlock* DoUnrolling() { return DoPeelUnrollImpl(/* to_unroll */ true); }
+  HBasicBlock* DoPeeling() { return DoPeelUnrollImpl(/* to_unroll= */ false); }
+  HBasicBlock* DoUnrolling() { return DoPeelUnrollImpl(/* to_unroll= */ true); }
   HLoopInformation* GetRegionToBeAdjusted() const { return cloner_.GetRegionToBeAdjusted(); }
 
  protected:
diff --git a/compiler/utils/arm/assembler_arm_vixl.cc b/compiler/utils/arm/assembler_arm_vixl.cc
index ebb631e..77f5d70 100644
--- a/compiler/utils/arm/assembler_arm_vixl.cc
+++ b/compiler/utils/arm/assembler_arm_vixl.cc
@@ -91,7 +91,7 @@
   ___ Ldr(temp, MemOperand(tr, Thread::IsGcMarkingOffset<kArmPointerSize>().Int32Value()));
   // Check that mr == self.tls32_.is.gc_marking.
   ___ Cmp(mr, temp);
-  ___ B(eq, &mr_is_ok, /* far_target */ false);
+  ___ B(eq, &mr_is_ok, /* is_far_target= */ false);
   ___ Bkpt(code);
   ___ Bind(&mr_is_ok);
 }
diff --git a/compiler/utils/assembler.h b/compiler/utils/assembler.h
index 096410d..0537225 100644
--- a/compiler/utils/assembler.h
+++ b/compiler/utils/assembler.h
@@ -295,7 +295,7 @@
   void ImplicitlyAdvancePC() final;
 
   explicit DebugFrameOpCodeWriterForAssembler(Assembler* buffer)
-      : dwarf::DebugFrameOpCodeWriter<>(false /* enabled */),
+      : dwarf::DebugFrameOpCodeWriter<>(/* enabled= */ false),
         assembler_(buffer),
         delay_emitting_advance_pc_(false),
         delayed_advance_pcs_() {
diff --git a/compiler/utils/assembler_thumb_test.cc b/compiler/utils/assembler_thumb_test.cc
index 3d26296..c9ece1d 100644
--- a/compiler/utils/assembler_thumb_test.cc
+++ b/compiler/utils/assembler_thumb_test.cc
@@ -239,7 +239,7 @@
   __ Load(scratch_register, FrameOffset(4092), 4);
   __ Load(scratch_register, FrameOffset(4096), 4);
   __ LoadRawPtrFromThread(scratch_register, ThreadOffset32(512));
-  __ LoadRef(method_register, scratch_register, MemberOffset(128), /* unpoison_reference */ false);
+  __ LoadRef(method_register, scratch_register, MemberOffset(128), /* unpoison_reference= */ false);
 
   // Stores
   __ Store(FrameOffset(32), method_register, 4);
@@ -284,7 +284,7 @@
 
   __ DecreaseFrameSize(4096);
   __ DecreaseFrameSize(32);
-  __ RemoveFrame(frame_size, callee_save_regs, /* may_suspend */ true);
+  __ RemoveFrame(frame_size, callee_save_regs, /* may_suspend= */ true);
 
   EmitAndCheck(&assembler, "VixlJniHelpers");
 }
diff --git a/compiler/utils/mips/assembler_mips.cc b/compiler/utils/mips/assembler_mips.cc
index a673e32..a9d1a25 100644
--- a/compiler/utils/mips/assembler_mips.cc
+++ b/compiler/utils/mips/assembler_mips.cc
@@ -463,7 +463,7 @@
 }
 
 void MipsAssembler::Addiu(Register rt, Register rs, uint16_t imm16) {
-  Addiu(rt, rs, imm16, /* patcher_label */ nullptr);
+  Addiu(rt, rs, imm16, /* patcher_label= */ nullptr);
 }
 
 void MipsAssembler::Subu(Register rd, Register rs, Register rt) {
@@ -732,7 +732,7 @@
 }
 
 void MipsAssembler::Lw(Register rt, Register rs, uint16_t imm16) {
-  Lw(rt, rs, imm16, /* patcher_label */ nullptr);
+  Lw(rt, rs, imm16, /* patcher_label= */ nullptr);
 }
 
 void MipsAssembler::Lwl(Register rt, Register rs, uint16_t imm16) {
@@ -814,7 +814,7 @@
 }
 
 void MipsAssembler::Sw(Register rt, Register rs, uint16_t imm16) {
-  Sw(rt, rs, imm16, /* patcher_label */ nullptr);
+  Sw(rt, rs, imm16, /* patcher_label= */ nullptr);
 }
 
 void MipsAssembler::Swl(Register rt, Register rs, uint16_t imm16) {
@@ -3755,7 +3755,7 @@
 
 void MipsAssembler::Buncond(MipsLabel* label, bool is_r6, bool is_bare) {
   uint32_t target = label->IsBound() ? GetLabelLocation(label) : Branch::kUnresolved;
-  branches_.emplace_back(is_r6, buffer_.Size(), target, /* is_call */ false, is_bare);
+  branches_.emplace_back(is_r6, buffer_.Size(), target, /* is_call= */ false, is_bare);
   MoveInstructionToDelaySlot(branches_.back());
   FinalizeLabeledBranch(label);
 }
@@ -3778,7 +3778,7 @@
 
 void MipsAssembler::Call(MipsLabel* label, bool is_r6, bool is_bare) {
   uint32_t target = label->IsBound() ? GetLabelLocation(label) : Branch::kUnresolved;
-  branches_.emplace_back(is_r6, buffer_.Size(), target, /* is_call */ true, is_bare);
+  branches_.emplace_back(is_r6, buffer_.Size(), target, /* is_call= */ true, is_bare);
   MoveInstructionToDelaySlot(branches_.back());
   FinalizeLabeledBranch(label);
 }
@@ -4300,43 +4300,43 @@
 }
 
 void MipsAssembler::B(MipsLabel* label, bool is_bare) {
-  Buncond(label, /* is_r6 */ (IsR6() && !is_bare), is_bare);
+  Buncond(label, /* is_r6= */ (IsR6() && !is_bare), is_bare);
 }
 
 void MipsAssembler::Bal(MipsLabel* label, bool is_bare) {
-  Call(label, /* is_r6 */ (IsR6() && !is_bare), is_bare);
+  Call(label, /* is_r6= */ (IsR6() && !is_bare), is_bare);
 }
 
 void MipsAssembler::Beq(Register rs, Register rt, MipsLabel* label, bool is_bare) {
-  Bcond(label, /* is_r6 */ (IsR6() && !is_bare), is_bare, kCondEQ, rs, rt);
+  Bcond(label, /* is_r6= */ (IsR6() && !is_bare), is_bare, kCondEQ, rs, rt);
 }
 
 void MipsAssembler::Bne(Register rs, Register rt, MipsLabel* label, bool is_bare) {
-  Bcond(label, /* is_r6 */ (IsR6() && !is_bare), is_bare, kCondNE, rs, rt);
+  Bcond(label, /* is_r6= */ (IsR6() && !is_bare), is_bare, kCondNE, rs, rt);
 }
 
 void MipsAssembler::Beqz(Register rt, MipsLabel* label, bool is_bare) {
-  Bcond(label, /* is_r6 */ (IsR6() && !is_bare), is_bare, kCondEQZ, rt);
+  Bcond(label, /* is_r6= */ (IsR6() && !is_bare), is_bare, kCondEQZ, rt);
 }
 
 void MipsAssembler::Bnez(Register rt, MipsLabel* label, bool is_bare) {
-  Bcond(label, /* is_r6 */ (IsR6() && !is_bare), is_bare, kCondNEZ, rt);
+  Bcond(label, /* is_r6= */ (IsR6() && !is_bare), is_bare, kCondNEZ, rt);
 }
 
 void MipsAssembler::Bltz(Register rt, MipsLabel* label, bool is_bare) {
-  Bcond(label, /* is_r6 */ (IsR6() && !is_bare), is_bare, kCondLTZ, rt);
+  Bcond(label, /* is_r6= */ (IsR6() && !is_bare), is_bare, kCondLTZ, rt);
 }
 
 void MipsAssembler::Bgez(Register rt, MipsLabel* label, bool is_bare) {
-  Bcond(label, /* is_r6 */ (IsR6() && !is_bare), is_bare, kCondGEZ, rt);
+  Bcond(label, /* is_r6= */ (IsR6() && !is_bare), is_bare, kCondGEZ, rt);
 }
 
 void MipsAssembler::Blez(Register rt, MipsLabel* label, bool is_bare) {
-  Bcond(label, /* is_r6 */ (IsR6() && !is_bare), is_bare, kCondLEZ, rt);
+  Bcond(label, /* is_r6= */ (IsR6() && !is_bare), is_bare, kCondLEZ, rt);
 }
 
 void MipsAssembler::Bgtz(Register rt, MipsLabel* label, bool is_bare) {
-  Bcond(label, /* is_r6 */ (IsR6() && !is_bare), is_bare, kCondGTZ, rt);
+  Bcond(label, /* is_r6= */ (IsR6() && !is_bare), is_bare, kCondGTZ, rt);
 }
 
 bool MipsAssembler::CanExchangeWithSlt(Register rs, Register rt) const {
@@ -4392,7 +4392,7 @@
     Bcond(label, IsR6(), is_bare, kCondLT, rs, rt);
   } else if (!Branch::IsNop(kCondLT, rs, rt)) {
     // Synthesize the instruction (not available on R2).
-    GenerateSltForCondBranch(/* unsigned_slt */ false, rs, rt);
+    GenerateSltForCondBranch(/* unsigned_slt= */ false, rs, rt);
     Bnez(AT, label, is_bare);
   }
 }
@@ -4404,7 +4404,7 @@
     B(label, is_bare);
   } else {
     // Synthesize the instruction (not available on R2).
-    GenerateSltForCondBranch(/* unsigned_slt */ false, rs, rt);
+    GenerateSltForCondBranch(/* unsigned_slt= */ false, rs, rt);
     Beqz(AT, label, is_bare);
   }
 }
@@ -4414,7 +4414,7 @@
     Bcond(label, IsR6(), is_bare, kCondLTU, rs, rt);
   } else if (!Branch::IsNop(kCondLTU, rs, rt)) {
     // Synthesize the instruction (not available on R2).
-    GenerateSltForCondBranch(/* unsigned_slt */ true, rs, rt);
+    GenerateSltForCondBranch(/* unsigned_slt= */ true, rs, rt);
     Bnez(AT, label, is_bare);
   }
 }
@@ -4426,7 +4426,7 @@
     B(label, is_bare);
   } else {
     // Synthesize the instruction (not available on R2).
-    GenerateSltForCondBranch(/* unsigned_slt */ true, rs, rt);
+    GenerateSltForCondBranch(/* unsigned_slt= */ true, rs, rt);
     Beqz(AT, label, is_bare);
   }
 }
@@ -4437,7 +4437,7 @@
 
 void MipsAssembler::Bc1f(int cc, MipsLabel* label, bool is_bare) {
   CHECK(IsUint<3>(cc)) << cc;
-  Bcond(label, /* is_r6 */ false, is_bare, kCondF, static_cast<Register>(cc), ZERO);
+  Bcond(label, /* is_r6= */ false, is_bare, kCondF, static_cast<Register>(cc), ZERO);
 }
 
 void MipsAssembler::Bc1t(MipsLabel* label, bool is_bare) {
@@ -4446,71 +4446,71 @@
 
 void MipsAssembler::Bc1t(int cc, MipsLabel* label, bool is_bare) {
   CHECK(IsUint<3>(cc)) << cc;
-  Bcond(label, /* is_r6 */ false, is_bare, kCondT, static_cast<Register>(cc), ZERO);
+  Bcond(label, /* is_r6= */ false, is_bare, kCondT, static_cast<Register>(cc), ZERO);
 }
 
 void MipsAssembler::Bc(MipsLabel* label, bool is_bare) {
-  Buncond(label, /* is_r6 */ true, is_bare);
+  Buncond(label, /* is_r6= */ true, is_bare);
 }
 
 void MipsAssembler::Balc(MipsLabel* label, bool is_bare) {
-  Call(label, /* is_r6 */ true, is_bare);
+  Call(label, /* is_r6= */ true, is_bare);
 }
 
 void MipsAssembler::Beqc(Register rs, Register rt, MipsLabel* label, bool is_bare) {
-  Bcond(label, /* is_r6 */ true, is_bare, kCondEQ, rs, rt);
+  Bcond(label, /* is_r6= */ true, is_bare, kCondEQ, rs, rt);
 }
 
 void MipsAssembler::Bnec(Register rs, Register rt, MipsLabel* label, bool is_bare) {
-  Bcond(label, /* is_r6 */ true, is_bare, kCondNE, rs, rt);
+  Bcond(label, /* is_r6= */ true, is_bare, kCondNE, rs, rt);
 }
 
 void MipsAssembler::Beqzc(Register rt, MipsLabel* label, bool is_bare) {
-  Bcond(label, /* is_r6 */ true, is_bare, kCondEQZ, rt);
+  Bcond(label, /* is_r6= */ true, is_bare, kCondEQZ, rt);
 }
 
 void MipsAssembler::Bnezc(Register rt, MipsLabel* label, bool is_bare) {
-  Bcond(label, /* is_r6 */ true, is_bare, kCondNEZ, rt);
+  Bcond(label, /* is_r6= */ true, is_bare, kCondNEZ, rt);
 }
 
 void MipsAssembler::Bltzc(Register rt, MipsLabel* label, bool is_bare) {
-  Bcond(label, /* is_r6 */ true, is_bare, kCondLTZ, rt);
+  Bcond(label, /* is_r6= */ true, is_bare, kCondLTZ, rt);
 }
 
 void MipsAssembler::Bgezc(Register rt, MipsLabel* label, bool is_bare) {
-  Bcond(label, /* is_r6 */ true, is_bare, kCondGEZ, rt);
+  Bcond(label, /* is_r6= */ true, is_bare, kCondGEZ, rt);
 }
 
 void MipsAssembler::Blezc(Register rt, MipsLabel* label, bool is_bare) {
-  Bcond(label, /* is_r6 */ true, is_bare, kCondLEZ, rt);
+  Bcond(label, /* is_r6= */ true, is_bare, kCondLEZ, rt);
 }
 
 void MipsAssembler::Bgtzc(Register rt, MipsLabel* label, bool is_bare) {
-  Bcond(label, /* is_r6 */ true, is_bare, kCondGTZ, rt);
+  Bcond(label, /* is_r6= */ true, is_bare, kCondGTZ, rt);
 }
 
 void MipsAssembler::Bltc(Register rs, Register rt, MipsLabel* label, bool is_bare) {
-  Bcond(label, /* is_r6 */ true, is_bare, kCondLT, rs, rt);
+  Bcond(label, /* is_r6= */ true, is_bare, kCondLT, rs, rt);
 }
 
 void MipsAssembler::Bgec(Register rs, Register rt, MipsLabel* label, bool is_bare) {
-  Bcond(label, /* is_r6 */ true, is_bare, kCondGE, rs, rt);
+  Bcond(label, /* is_r6= */ true, is_bare, kCondGE, rs, rt);
 }
 
 void MipsAssembler::Bltuc(Register rs, Register rt, MipsLabel* label, bool is_bare) {
-  Bcond(label, /* is_r6 */ true, is_bare, kCondLTU, rs, rt);
+  Bcond(label, /* is_r6= */ true, is_bare, kCondLTU, rs, rt);
 }
 
 void MipsAssembler::Bgeuc(Register rs, Register rt, MipsLabel* label, bool is_bare) {
-  Bcond(label, /* is_r6 */ true, is_bare, kCondGEU, rs, rt);
+  Bcond(label, /* is_r6= */ true, is_bare, kCondGEU, rs, rt);
 }
 
 void MipsAssembler::Bc1eqz(FRegister ft, MipsLabel* label, bool is_bare) {
-  Bcond(label, /* is_r6 */ true, is_bare, kCondF, static_cast<Register>(ft), ZERO);
+  Bcond(label, /* is_r6= */ true, is_bare, kCondF, static_cast<Register>(ft), ZERO);
 }
 
 void MipsAssembler::Bc1nez(FRegister ft, MipsLabel* label, bool is_bare) {
-  Bcond(label, /* is_r6 */ true, is_bare, kCondT, static_cast<Register>(ft), ZERO);
+  Bcond(label, /* is_r6= */ true, is_bare, kCondT, static_cast<Register>(ft), ZERO);
 }
 
 void MipsAssembler::AdjustBaseAndOffset(Register& base,
diff --git a/compiler/utils/mips/assembler_mips.h b/compiler/utils/mips/assembler_mips.h
index 8a1e1df..69189a4 100644
--- a/compiler/utils/mips/assembler_mips.h
+++ b/compiler/utils/mips/assembler_mips.h
@@ -862,7 +862,7 @@
     // We permit `base` and `temp` to coincide (however, we check that neither is AT),
     // in which case the `base` register may be overwritten in the process.
     CHECK_NE(temp, AT);  // Must not use AT as temp, so as not to overwrite the adjusted base.
-    AdjustBaseAndOffset(base, offset, /* is_doubleword */ (type == kStoreDoubleword));
+    AdjustBaseAndOffset(base, offset, /* is_doubleword= */ (type == kStoreDoubleword));
     uint32_t low = Low32Bits(value);
     uint32_t high = High32Bits(value);
     Register reg;
@@ -917,7 +917,7 @@
                       Register base,
                       int32_t offset,
                       ImplicitNullChecker null_checker = NoImplicitNullChecker()) {
-    AdjustBaseAndOffset(base, offset, /* is_doubleword */ (type == kLoadDoubleword));
+    AdjustBaseAndOffset(base, offset, /* is_doubleword= */ (type == kLoadDoubleword));
     switch (type) {
       case kLoadSignedByte:
         Lb(reg, base, offset);
@@ -960,7 +960,7 @@
                        Register base,
                        int32_t offset,
                        ImplicitNullChecker null_checker = NoImplicitNullChecker()) {
-    AdjustBaseAndOffset(base, offset, /* is_doubleword */ false, /* is_float */ true);
+    AdjustBaseAndOffset(base, offset, /* is_doubleword= */ false, /* is_float= */ true);
     Lwc1(reg, base, offset);
     null_checker();
   }
@@ -970,7 +970,7 @@
                        Register base,
                        int32_t offset,
                        ImplicitNullChecker null_checker = NoImplicitNullChecker()) {
-    AdjustBaseAndOffset(base, offset, /* is_doubleword */ true, /* is_float */ true);
+    AdjustBaseAndOffset(base, offset, /* is_doubleword= */ true, /* is_float= */ true);
     if (IsAligned<kMipsDoublewordSize>(offset)) {
       Ldc1(reg, base, offset);
       null_checker();
@@ -1016,7 +1016,7 @@
     // Must not use AT as `reg`, so as not to overwrite the value being stored
     // with the adjusted `base`.
     CHECK_NE(reg, AT);
-    AdjustBaseAndOffset(base, offset, /* is_doubleword */ (type == kStoreDoubleword));
+    AdjustBaseAndOffset(base, offset, /* is_doubleword= */ (type == kStoreDoubleword));
     switch (type) {
       case kStoreByte:
         Sb(reg, base, offset);
@@ -1047,7 +1047,7 @@
                       Register base,
                       int32_t offset,
                       ImplicitNullChecker null_checker = NoImplicitNullChecker()) {
-    AdjustBaseAndOffset(base, offset, /* is_doubleword */ false, /* is_float */ true);
+    AdjustBaseAndOffset(base, offset, /* is_doubleword= */ false, /* is_float= */ true);
     Swc1(reg, base, offset);
     null_checker();
   }
@@ -1057,7 +1057,7 @@
                       Register base,
                       int32_t offset,
                       ImplicitNullChecker null_checker = NoImplicitNullChecker()) {
-    AdjustBaseAndOffset(base, offset, /* is_doubleword */ true, /* is_float */ true);
+    AdjustBaseAndOffset(base, offset, /* is_doubleword= */ true, /* is_float= */ true);
     if (IsAligned<kMipsDoublewordSize>(offset)) {
       Sdc1(reg, base, offset);
       null_checker();
diff --git a/compiler/utils/mips/assembler_mips32r6_test.cc b/compiler/utils/mips/assembler_mips32r6_test.cc
index 723c489..4e27bbf 100644
--- a/compiler/utils/mips/assembler_mips32r6_test.cc
+++ b/compiler/utils/mips/assembler_mips32r6_test.cc
@@ -1078,11 +1078,11 @@
 //////////////
 
 TEST_F(AssemblerMIPS32r6Test, Bc) {
-  BranchHelper(&mips::MipsAssembler::Bc, "Bc", /* has_slot */ false);
+  BranchHelper(&mips::MipsAssembler::Bc, "Bc", /* has_slot= */ false);
 }
 
 TEST_F(AssemblerMIPS32r6Test, Balc) {
-  BranchHelper(&mips::MipsAssembler::Balc, "Balc", /* has_slot */ false);
+  BranchHelper(&mips::MipsAssembler::Balc, "Balc", /* has_slot= */ false);
 }
 
 TEST_F(AssemblerMIPS32r6Test, Beqc) {
@@ -1142,11 +1142,11 @@
 }
 
 TEST_F(AssemblerMIPS32r6Test, B) {
-  BranchHelper(&mips::MipsAssembler::B, "Bc", /* has_slot */ false);
+  BranchHelper(&mips::MipsAssembler::B, "Bc", /* has_slot= */ false);
 }
 
 TEST_F(AssemblerMIPS32r6Test, Bal) {
-  BranchHelper(&mips::MipsAssembler::Bal, "Balc", /* has_slot */ false);
+  BranchHelper(&mips::MipsAssembler::Bal, "Balc", /* has_slot= */ false);
 }
 
 TEST_F(AssemblerMIPS32r6Test, Beq) {
@@ -1198,123 +1198,123 @@
 }
 
 TEST_F(AssemblerMIPS32r6Test, BareBc) {
-  BranchHelper(&mips::MipsAssembler::Bc, "Bc", /* has_slot */ false, /* is_bare */ true);
+  BranchHelper(&mips::MipsAssembler::Bc, "Bc", /* has_slot= */ false, /* is_bare= */ true);
 }
 
 TEST_F(AssemblerMIPS32r6Test, BareBalc) {
-  BranchHelper(&mips::MipsAssembler::Balc, "Balc", /* has_slot */ false, /* is_bare */ true);
+  BranchHelper(&mips::MipsAssembler::Balc, "Balc", /* has_slot= */ false, /* is_bare= */ true);
 }
 
 TEST_F(AssemblerMIPS32r6Test, BareBeqc) {
-  BranchCondTwoRegsHelper(&mips::MipsAssembler::Beqc, "Beqc", /* is_bare */ true);
+  BranchCondTwoRegsHelper(&mips::MipsAssembler::Beqc, "Beqc", /* is_bare= */ true);
 }
 
 TEST_F(AssemblerMIPS32r6Test, BareBnec) {
-  BranchCondTwoRegsHelper(&mips::MipsAssembler::Bnec, "Bnec", /* is_bare */ true);
+  BranchCondTwoRegsHelper(&mips::MipsAssembler::Bnec, "Bnec", /* is_bare= */ true);
 }
 
 TEST_F(AssemblerMIPS32r6Test, BareBeqzc) {
-  BranchCondOneRegHelper(&mips::MipsAssembler::Beqzc, "Beqzc", /* is_bare */ true);
+  BranchCondOneRegHelper(&mips::MipsAssembler::Beqzc, "Beqzc", /* is_bare= */ true);
 }
 
 TEST_F(AssemblerMIPS32r6Test, BareBnezc) {
-  BranchCondOneRegHelper(&mips::MipsAssembler::Bnezc, "Bnezc", /* is_bare */ true);
+  BranchCondOneRegHelper(&mips::MipsAssembler::Bnezc, "Bnezc", /* is_bare= */ true);
 }
 
 TEST_F(AssemblerMIPS32r6Test, BareBltzc) {
-  BranchCondOneRegHelper(&mips::MipsAssembler::Bltzc, "Bltzc", /* is_bare */ true);
+  BranchCondOneRegHelper(&mips::MipsAssembler::Bltzc, "Bltzc", /* is_bare= */ true);
 }
 
 TEST_F(AssemblerMIPS32r6Test, BareBgezc) {
-  BranchCondOneRegHelper(&mips::MipsAssembler::Bgezc, "Bgezc", /* is_bare */ true);
+  BranchCondOneRegHelper(&mips::MipsAssembler::Bgezc, "Bgezc", /* is_bare= */ true);
 }
 
 TEST_F(AssemblerMIPS32r6Test, BareBlezc) {
-  BranchCondOneRegHelper(&mips::MipsAssembler::Blezc, "Blezc", /* is_bare */ true);
+  BranchCondOneRegHelper(&mips::MipsAssembler::Blezc, "Blezc", /* is_bare= */ true);
 }
 
 TEST_F(AssemblerMIPS32r6Test, BareBgtzc) {
-  BranchCondOneRegHelper(&mips::MipsAssembler::Bgtzc, "Bgtzc", /* is_bare */ true);
+  BranchCondOneRegHelper(&mips::MipsAssembler::Bgtzc, "Bgtzc", /* is_bare= */ true);
 }
 
 TEST_F(AssemblerMIPS32r6Test, BareBltc) {
-  BranchCondTwoRegsHelper(&mips::MipsAssembler::Bltc, "Bltc", /* is_bare */ true);
+  BranchCondTwoRegsHelper(&mips::MipsAssembler::Bltc, "Bltc", /* is_bare= */ true);
 }
 
 TEST_F(AssemblerMIPS32r6Test, BareBgec) {
-  BranchCondTwoRegsHelper(&mips::MipsAssembler::Bgec, "Bgec", /* is_bare */ true);
+  BranchCondTwoRegsHelper(&mips::MipsAssembler::Bgec, "Bgec", /* is_bare= */ true);
 }
 
 TEST_F(AssemblerMIPS32r6Test, BareBltuc) {
-  BranchCondTwoRegsHelper(&mips::MipsAssembler::Bltuc, "Bltuc", /* is_bare */ true);
+  BranchCondTwoRegsHelper(&mips::MipsAssembler::Bltuc, "Bltuc", /* is_bare= */ true);
 }
 
 TEST_F(AssemblerMIPS32r6Test, BareBgeuc) {
-  BranchCondTwoRegsHelper(&mips::MipsAssembler::Bgeuc, "Bgeuc", /* is_bare */ true);
+  BranchCondTwoRegsHelper(&mips::MipsAssembler::Bgeuc, "Bgeuc", /* is_bare= */ true);
 }
 
 TEST_F(AssemblerMIPS32r6Test, BareBc1eqz) {
-  BranchFpuCondHelper(&mips::MipsAssembler::Bc1eqz, "Bc1eqz", /* is_bare */ true);
+  BranchFpuCondHelper(&mips::MipsAssembler::Bc1eqz, "Bc1eqz", /* is_bare= */ true);
 }
 
 TEST_F(AssemblerMIPS32r6Test, BareBc1nez) {
-  BranchFpuCondHelper(&mips::MipsAssembler::Bc1nez, "Bc1nez", /* is_bare */ true);
+  BranchFpuCondHelper(&mips::MipsAssembler::Bc1nez, "Bc1nez", /* is_bare= */ true);
 }
 
 TEST_F(AssemblerMIPS32r6Test, BareB) {
-  BranchHelper(&mips::MipsAssembler::B, "B", /* has_slot */ true, /* is_bare */ true);
+  BranchHelper(&mips::MipsAssembler::B, "B", /* has_slot= */ true, /* is_bare= */ true);
 }
 
 TEST_F(AssemblerMIPS32r6Test, BareBal) {
-  BranchHelper(&mips::MipsAssembler::Bal, "Bal", /* has_slot */ true, /* is_bare */ true);
+  BranchHelper(&mips::MipsAssembler::Bal, "Bal", /* has_slot= */ true, /* is_bare= */ true);
 }
 
 TEST_F(AssemblerMIPS32r6Test, BareBeq) {
-  BranchCondTwoRegsHelper(&mips::MipsAssembler::Beq, "Beq", /* is_bare */ true);
+  BranchCondTwoRegsHelper(&mips::MipsAssembler::Beq, "Beq", /* is_bare= */ true);
 }
 
 TEST_F(AssemblerMIPS32r6Test, BareBne) {
-  BranchCondTwoRegsHelper(&mips::MipsAssembler::Bne, "Bne", /* is_bare */ true);
+  BranchCondTwoRegsHelper(&mips::MipsAssembler::Bne, "Bne", /* is_bare= */ true);
 }
 
 TEST_F(AssemblerMIPS32r6Test, BareBeqz) {
-  BranchCondOneRegHelper(&mips::MipsAssembler::Beqz, "Beqz", /* is_bare */ true);
+  BranchCondOneRegHelper(&mips::MipsAssembler::Beqz, "Beqz", /* is_bare= */ true);
 }
 
 TEST_F(AssemblerMIPS32r6Test, BareBnez) {
-  BranchCondOneRegHelper(&mips::MipsAssembler::Bnez, "Bnez", /* is_bare */ true);
+  BranchCondOneRegHelper(&mips::MipsAssembler::Bnez, "Bnez", /* is_bare= */ true);
 }
 
 TEST_F(AssemblerMIPS32r6Test, BareBltz) {
-  BranchCondOneRegHelper(&mips::MipsAssembler::Bltz, "Bltz", /* is_bare */ true);
+  BranchCondOneRegHelper(&mips::MipsAssembler::Bltz, "Bltz", /* is_bare= */ true);
 }
 
 TEST_F(AssemblerMIPS32r6Test, BareBgez) {
-  BranchCondOneRegHelper(&mips::MipsAssembler::Bgez, "Bgez", /* is_bare */ true);
+  BranchCondOneRegHelper(&mips::MipsAssembler::Bgez, "Bgez", /* is_bare= */ true);
 }
 
 TEST_F(AssemblerMIPS32r6Test, BareBlez) {
-  BranchCondOneRegHelper(&mips::MipsAssembler::Blez, "Blez", /* is_bare */ true);
+  BranchCondOneRegHelper(&mips::MipsAssembler::Blez, "Blez", /* is_bare= */ true);
 }
 
 TEST_F(AssemblerMIPS32r6Test, BareBgtz) {
-  BranchCondOneRegHelper(&mips::MipsAssembler::Bgtz, "Bgtz", /* is_bare */ true);
+  BranchCondOneRegHelper(&mips::MipsAssembler::Bgtz, "Bgtz", /* is_bare= */ true);
 }
 
 TEST_F(AssemblerMIPS32r6Test, BareBlt) {
-  BranchCondTwoRegsHelper(&mips::MipsAssembler::Blt, "Blt", /* is_bare */ true);
+  BranchCondTwoRegsHelper(&mips::MipsAssembler::Blt, "Blt", /* is_bare= */ true);
 }
 
 TEST_F(AssemblerMIPS32r6Test, BareBge) {
-  BranchCondTwoRegsHelper(&mips::MipsAssembler::Bge, "Bge", /* is_bare */ true);
+  BranchCondTwoRegsHelper(&mips::MipsAssembler::Bge, "Bge", /* is_bare= */ true);
 }
 
 TEST_F(AssemblerMIPS32r6Test, BareBltu) {
-  BranchCondTwoRegsHelper(&mips::MipsAssembler::Bltu, "Bltu", /* is_bare */ true);
+  BranchCondTwoRegsHelper(&mips::MipsAssembler::Bltu, "Bltu", /* is_bare= */ true);
 }
 
 TEST_F(AssemblerMIPS32r6Test, BareBgeu) {
-  BranchCondTwoRegsHelper(&mips::MipsAssembler::Bgeu, "Bgeu", /* is_bare */ true);
+  BranchCondTwoRegsHelper(&mips::MipsAssembler::Bgeu, "Bgeu", /* is_bare= */ true);
 }
 
 TEST_F(AssemblerMIPS32r6Test, LongBeqc) {
diff --git a/compiler/utils/mips/assembler_mips_test.cc b/compiler/utils/mips/assembler_mips_test.cc
index 4f8ccee..c0894d3 100644
--- a/compiler/utils/mips/assembler_mips_test.cc
+++ b/compiler/utils/mips/assembler_mips_test.cc
@@ -2241,67 +2241,67 @@
 }
 
 TEST_F(AssemblerMIPSTest, BareB) {
-  BranchHelper(&mips::MipsAssembler::B, "B", /* is_bare */ true);
+  BranchHelper(&mips::MipsAssembler::B, "B", /* is_bare= */ true);
 }
 
 TEST_F(AssemblerMIPSTest, BareBal) {
-  BranchHelper(&mips::MipsAssembler::Bal, "Bal", /* is_bare */ true);
+  BranchHelper(&mips::MipsAssembler::Bal, "Bal", /* is_bare= */ true);
 }
 
 TEST_F(AssemblerMIPSTest, BareBeq) {
-  BranchCondTwoRegsHelper(&mips::MipsAssembler::Beq, "Beq", /* is_bare */ true);
+  BranchCondTwoRegsHelper(&mips::MipsAssembler::Beq, "Beq", /* is_bare= */ true);
 }
 
 TEST_F(AssemblerMIPSTest, BareBne) {
-  BranchCondTwoRegsHelper(&mips::MipsAssembler::Bne, "Bne", /* is_bare */ true);
+  BranchCondTwoRegsHelper(&mips::MipsAssembler::Bne, "Bne", /* is_bare= */ true);
 }
 
 TEST_F(AssemblerMIPSTest, BareBeqz) {
-  BranchCondOneRegHelper(&mips::MipsAssembler::Beqz, "Beqz", /* is_bare */ true);
+  BranchCondOneRegHelper(&mips::MipsAssembler::Beqz, "Beqz", /* is_bare= */ true);
 }
 
 TEST_F(AssemblerMIPSTest, BareBnez) {
-  BranchCondOneRegHelper(&mips::MipsAssembler::Bnez, "Bnez", /* is_bare */ true);
+  BranchCondOneRegHelper(&mips::MipsAssembler::Bnez, "Bnez", /* is_bare= */ true);
 }
 
 TEST_F(AssemblerMIPSTest, BareBltz) {
-  BranchCondOneRegHelper(&mips::MipsAssembler::Bltz, "Bltz", /* is_bare */ true);
+  BranchCondOneRegHelper(&mips::MipsAssembler::Bltz, "Bltz", /* is_bare= */ true);
 }
 
 TEST_F(AssemblerMIPSTest, BareBgez) {
-  BranchCondOneRegHelper(&mips::MipsAssembler::Bgez, "Bgez", /* is_bare */ true);
+  BranchCondOneRegHelper(&mips::MipsAssembler::Bgez, "Bgez", /* is_bare= */ true);
 }
 
 TEST_F(AssemblerMIPSTest, BareBlez) {
-  BranchCondOneRegHelper(&mips::MipsAssembler::Blez, "Blez", /* is_bare */ true);
+  BranchCondOneRegHelper(&mips::MipsAssembler::Blez, "Blez", /* is_bare= */ true);
 }
 
 TEST_F(AssemblerMIPSTest, BareBgtz) {
-  BranchCondOneRegHelper(&mips::MipsAssembler::Bgtz, "Bgtz", /* is_bare */ true);
+  BranchCondOneRegHelper(&mips::MipsAssembler::Bgtz, "Bgtz", /* is_bare= */ true);
 }
 
 TEST_F(AssemblerMIPSTest, BareBlt) {
-  BranchCondTwoRegsHelper(&mips::MipsAssembler::Blt, "Blt", /* is_bare */ true);
+  BranchCondTwoRegsHelper(&mips::MipsAssembler::Blt, "Blt", /* is_bare= */ true);
 }
 
 TEST_F(AssemblerMIPSTest, BareBge) {
-  BranchCondTwoRegsHelper(&mips::MipsAssembler::Bge, "Bge", /* is_bare */ true);
+  BranchCondTwoRegsHelper(&mips::MipsAssembler::Bge, "Bge", /* is_bare= */ true);
 }
 
 TEST_F(AssemblerMIPSTest, BareBltu) {
-  BranchCondTwoRegsHelper(&mips::MipsAssembler::Bltu, "Bltu", /* is_bare */ true);
+  BranchCondTwoRegsHelper(&mips::MipsAssembler::Bltu, "Bltu", /* is_bare= */ true);
 }
 
 TEST_F(AssemblerMIPSTest, BareBgeu) {
-  BranchCondTwoRegsHelper(&mips::MipsAssembler::Bgeu, "Bgeu", /* is_bare */ true);
+  BranchCondTwoRegsHelper(&mips::MipsAssembler::Bgeu, "Bgeu", /* is_bare= */ true);
 }
 
 TEST_F(AssemblerMIPSTest, BareBc1f) {
-  BranchFpuCondCodeHelper(&mips::MipsAssembler::Bc1f, "Bc1f", /* is_bare */ true);
+  BranchFpuCondCodeHelper(&mips::MipsAssembler::Bc1f, "Bc1f", /* is_bare= */ true);
 }
 
 TEST_F(AssemblerMIPSTest, BareBc1t) {
-  BranchFpuCondCodeHelper(&mips::MipsAssembler::Bc1t, "Bc1t", /* is_bare */ true);
+  BranchFpuCondCodeHelper(&mips::MipsAssembler::Bc1t, "Bc1t", /* is_bare= */ true);
 }
 
 TEST_F(AssemblerMIPSTest, ImpossibleReordering) {
diff --git a/compiler/utils/mips64/assembler_mips64.cc b/compiler/utils/mips64/assembler_mips64.cc
index 29d2bed..70313ca 100644
--- a/compiler/utils/mips64/assembler_mips64.cc
+++ b/compiler/utils/mips64/assembler_mips64.cc
@@ -2455,7 +2455,7 @@
       condition_(kUncond) {
   InitializeType(
       (is_call ? (is_bare ? kBareCall : kCall) : (is_bare ? kBareCondBranch : kCondBranch)),
-      /* is_r6 */ true);
+      /* is_r6= */ true);
 }
 
 Mips64Assembler::Branch::Branch(bool is_r6,
@@ -2516,7 +2516,7 @@
       rhs_reg_(ZERO),
       condition_(kUncond) {
   CHECK_NE(dest_reg, ZERO);
-  InitializeType(label_or_literal_type, /* is_r6 */ true);
+  InitializeType(label_or_literal_type, /* is_r6= */ true);
 }
 
 Mips64Assembler::BranchCondition Mips64Assembler::Branch::OppositeCondition(
@@ -2896,7 +2896,7 @@
 
 void Mips64Assembler::Buncond(Mips64Label* label, bool is_bare) {
   uint32_t target = label->IsBound() ? GetLabelLocation(label) : Branch::kUnresolved;
-  branches_.emplace_back(buffer_.Size(), target, /* is_call */ false, is_bare);
+  branches_.emplace_back(buffer_.Size(), target, /* is_call= */ false, is_bare);
   FinalizeLabeledBranch(label);
 }
 
@@ -2917,7 +2917,7 @@
 
 void Mips64Assembler::Call(Mips64Label* label, bool is_bare) {
   uint32_t target = label->IsBound() ? GetLabelLocation(label) : Branch::kUnresolved;
-  branches_.emplace_back(buffer_.Size(), target, /* is_call */ true, is_bare);
+  branches_.emplace_back(buffer_.Size(), target, /* is_call= */ true, is_bare);
   FinalizeLabeledBranch(label);
 }
 
@@ -3278,99 +3278,99 @@
 }
 
 void Mips64Assembler::Bltc(GpuRegister rs, GpuRegister rt, Mips64Label* label, bool is_bare) {
-  Bcond(label, /* is_r6 */ true, is_bare, kCondLT, rs, rt);
+  Bcond(label, /* is_r6= */ true, is_bare, kCondLT, rs, rt);
 }
 
 void Mips64Assembler::Bltzc(GpuRegister rt, Mips64Label* label, bool is_bare) {
-  Bcond(label, /* is_r6 */ true, is_bare, kCondLTZ, rt);
+  Bcond(label, /* is_r6= */ true, is_bare, kCondLTZ, rt);
 }
 
 void Mips64Assembler::Bgtzc(GpuRegister rt, Mips64Label* label, bool is_bare) {
-  Bcond(label, /* is_r6 */ true, is_bare, kCondGTZ, rt);
+  Bcond(label, /* is_r6= */ true, is_bare, kCondGTZ, rt);
 }
 
 void Mips64Assembler::Bgec(GpuRegister rs, GpuRegister rt, Mips64Label* label, bool is_bare) {
-  Bcond(label, /* is_r6 */ true, is_bare, kCondGE, rs, rt);
+  Bcond(label, /* is_r6= */ true, is_bare, kCondGE, rs, rt);
 }
 
 void Mips64Assembler::Bgezc(GpuRegister rt, Mips64Label* label, bool is_bare) {
-  Bcond(label, /* is_r6 */ true, is_bare, kCondGEZ, rt);
+  Bcond(label, /* is_r6= */ true, is_bare, kCondGEZ, rt);
 }
 
 void Mips64Assembler::Blezc(GpuRegister rt, Mips64Label* label, bool is_bare) {
-  Bcond(label, /* is_r6 */ true, is_bare, kCondLEZ, rt);
+  Bcond(label, /* is_r6= */ true, is_bare, kCondLEZ, rt);
 }
 
 void Mips64Assembler::Bltuc(GpuRegister rs, GpuRegister rt, Mips64Label* label, bool is_bare) {
-  Bcond(label, /* is_r6 */ true, is_bare, kCondLTU, rs, rt);
+  Bcond(label, /* is_r6= */ true, is_bare, kCondLTU, rs, rt);
 }
 
 void Mips64Assembler::Bgeuc(GpuRegister rs, GpuRegister rt, Mips64Label* label, bool is_bare) {
-  Bcond(label, /* is_r6 */ true, is_bare, kCondGEU, rs, rt);
+  Bcond(label, /* is_r6= */ true, is_bare, kCondGEU, rs, rt);
 }
 
 void Mips64Assembler::Beqc(GpuRegister rs, GpuRegister rt, Mips64Label* label, bool is_bare) {
-  Bcond(label, /* is_r6 */ true, is_bare, kCondEQ, rs, rt);
+  Bcond(label, /* is_r6= */ true, is_bare, kCondEQ, rs, rt);
 }
 
 void Mips64Assembler::Bnec(GpuRegister rs, GpuRegister rt, Mips64Label* label, bool is_bare) {
-  Bcond(label, /* is_r6 */ true, is_bare, kCondNE, rs, rt);
+  Bcond(label, /* is_r6= */ true, is_bare, kCondNE, rs, rt);
 }
 
 void Mips64Assembler::Beqzc(GpuRegister rs, Mips64Label* label, bool is_bare) {
-  Bcond(label, /* is_r6 */ true, is_bare, kCondEQZ, rs);
+  Bcond(label, /* is_r6= */ true, is_bare, kCondEQZ, rs);
 }
 
 void Mips64Assembler::Bnezc(GpuRegister rs, Mips64Label* label, bool is_bare) {
-  Bcond(label, /* is_r6 */ true, is_bare, kCondNEZ, rs);
+  Bcond(label, /* is_r6= */ true, is_bare, kCondNEZ, rs);
 }
 
 void Mips64Assembler::Bc1eqz(FpuRegister ft, Mips64Label* label, bool is_bare) {
-  Bcond(label, /* is_r6 */ true, is_bare, kCondF, static_cast<GpuRegister>(ft), ZERO);
+  Bcond(label, /* is_r6= */ true, is_bare, kCondF, static_cast<GpuRegister>(ft), ZERO);
 }
 
 void Mips64Assembler::Bc1nez(FpuRegister ft, Mips64Label* label, bool is_bare) {
-  Bcond(label, /* is_r6 */ true, is_bare, kCondT, static_cast<GpuRegister>(ft), ZERO);
+  Bcond(label, /* is_r6= */ true, is_bare, kCondT, static_cast<GpuRegister>(ft), ZERO);
 }
 
 void Mips64Assembler::Bltz(GpuRegister rt, Mips64Label* label, bool is_bare) {
   CHECK(is_bare);
-  Bcond(label, /* is_r6 */ false, is_bare, kCondLTZ, rt);
+  Bcond(label, /* is_r6= */ false, is_bare, kCondLTZ, rt);
 }
 
 void Mips64Assembler::Bgtz(GpuRegister rt, Mips64Label* label, bool is_bare) {
   CHECK(is_bare);
-  Bcond(label, /* is_r6 */ false, is_bare, kCondGTZ, rt);
+  Bcond(label, /* is_r6= */ false, is_bare, kCondGTZ, rt);
 }
 
 void Mips64Assembler::Bgez(GpuRegister rt, Mips64Label* label, bool is_bare) {
   CHECK(is_bare);
-  Bcond(label, /* is_r6 */ false, is_bare, kCondGEZ, rt);
+  Bcond(label, /* is_r6= */ false, is_bare, kCondGEZ, rt);
 }
 
 void Mips64Assembler::Blez(GpuRegister rt, Mips64Label* label, bool is_bare) {
   CHECK(is_bare);
-  Bcond(label, /* is_r6 */ false, is_bare, kCondLEZ, rt);
+  Bcond(label, /* is_r6= */ false, is_bare, kCondLEZ, rt);
 }
 
 void Mips64Assembler::Beq(GpuRegister rs, GpuRegister rt, Mips64Label* label, bool is_bare) {
   CHECK(is_bare);
-  Bcond(label, /* is_r6 */ false, is_bare, kCondEQ, rs, rt);
+  Bcond(label, /* is_r6= */ false, is_bare, kCondEQ, rs, rt);
 }
 
 void Mips64Assembler::Bne(GpuRegister rs, GpuRegister rt, Mips64Label* label, bool is_bare) {
   CHECK(is_bare);
-  Bcond(label, /* is_r6 */ false, is_bare, kCondNE, rs, rt);
+  Bcond(label, /* is_r6= */ false, is_bare, kCondNE, rs, rt);
 }
 
 void Mips64Assembler::Beqz(GpuRegister rs, Mips64Label* label, bool is_bare) {
   CHECK(is_bare);
-  Bcond(label, /* is_r6 */ false, is_bare, kCondEQZ, rs);
+  Bcond(label, /* is_r6= */ false, is_bare, kCondEQZ, rs);
 }
 
 void Mips64Assembler::Bnez(GpuRegister rs, Mips64Label* label, bool is_bare) {
   CHECK(is_bare);
-  Bcond(label, /* is_r6 */ false, is_bare, kCondNEZ, rs);
+  Bcond(label, /* is_r6= */ false, is_bare, kCondNEZ, rs);
 }
 
 void Mips64Assembler::AdjustBaseAndOffset(GpuRegister& base,
diff --git a/compiler/utils/mips64/assembler_mips64.h b/compiler/utils/mips64/assembler_mips64.h
index ce447db..2f991e9 100644
--- a/compiler/utils/mips64/assembler_mips64.h
+++ b/compiler/utils/mips64/assembler_mips64.h
@@ -1058,7 +1058,7 @@
     // We permit `base` and `temp` to coincide (however, we check that neither is AT),
     // in which case the `base` register may be overwritten in the process.
     CHECK_NE(temp, AT);  // Must not use AT as temp, so as not to overwrite the adjusted base.
-    AdjustBaseAndOffset(base, offset, /* is_doubleword */ (type == kStoreDoubleword));
+    AdjustBaseAndOffset(base, offset, /* is_doubleword= */ (type == kStoreDoubleword));
     GpuRegister reg;
     // If the adjustment left `base` unchanged and equal to `temp`, we can't use `temp`
     // to load and hold the value but we can use AT instead as AT hasn't been used yet.
@@ -1127,7 +1127,7 @@
                       GpuRegister base,
                       int32_t offset,
                       ImplicitNullChecker null_checker = NoImplicitNullChecker()) {
-    AdjustBaseAndOffset(base, offset, /* is_doubleword */ (type == kLoadDoubleword));
+    AdjustBaseAndOffset(base, offset, /* is_doubleword= */ (type == kLoadDoubleword));
 
     switch (type) {
       case kLoadSignedByte:
@@ -1178,7 +1178,7 @@
                          ImplicitNullChecker null_checker = NoImplicitNullChecker()) {
     int element_size_shift = -1;
     if (type != kLoadQuadword) {
-      AdjustBaseAndOffset(base, offset, /* is_doubleword */ (type == kLoadDoubleword));
+      AdjustBaseAndOffset(base, offset, /* is_doubleword= */ (type == kLoadDoubleword));
     } else {
       AdjustBaseOffsetAndElementSizeShift(base, offset, element_size_shift);
     }
@@ -1226,7 +1226,7 @@
     // Must not use AT as `reg`, so as not to overwrite the value being stored
     // with the adjusted `base`.
     CHECK_NE(reg, AT);
-    AdjustBaseAndOffset(base, offset, /* is_doubleword */ (type == kStoreDoubleword));
+    AdjustBaseAndOffset(base, offset, /* is_doubleword= */ (type == kStoreDoubleword));
 
     switch (type) {
       case kStoreByte:
@@ -1267,7 +1267,7 @@
                         ImplicitNullChecker null_checker = NoImplicitNullChecker()) {
     int element_size_shift = -1;
     if (type != kStoreQuadword) {
-      AdjustBaseAndOffset(base, offset, /* is_doubleword */ (type == kStoreDoubleword));
+      AdjustBaseAndOffset(base, offset, /* is_doubleword= */ (type == kStoreDoubleword));
     } else {
       AdjustBaseOffsetAndElementSizeShift(base, offset, element_size_shift);
     }
diff --git a/compiler/utils/mips64/assembler_mips64_test.cc b/compiler/utils/mips64/assembler_mips64_test.cc
index 66711c3..499e8f4 100644
--- a/compiler/utils/mips64/assembler_mips64_test.cc
+++ b/compiler/utils/mips64/assembler_mips64_test.cc
@@ -852,99 +852,99 @@
 }
 
 TEST_F(AssemblerMIPS64Test, BareBc) {
-  BranchHelper(&mips64::Mips64Assembler::Bc, "Bc", /* is_bare */ true);
+  BranchHelper(&mips64::Mips64Assembler::Bc, "Bc", /* is_bare= */ true);
 }
 
 TEST_F(AssemblerMIPS64Test, BareBalc) {
-  BranchHelper(&mips64::Mips64Assembler::Balc, "Balc", /* is_bare */ true);
+  BranchHelper(&mips64::Mips64Assembler::Balc, "Balc", /* is_bare= */ true);
 }
 
 TEST_F(AssemblerMIPS64Test, BareBeqzc) {
-  BranchCondOneRegHelper(&mips64::Mips64Assembler::Beqzc, "Beqzc", /* is_bare */ true);
+  BranchCondOneRegHelper(&mips64::Mips64Assembler::Beqzc, "Beqzc", /* is_bare= */ true);
 }
 
 TEST_F(AssemblerMIPS64Test, BareBnezc) {
-  BranchCondOneRegHelper(&mips64::Mips64Assembler::Bnezc, "Bnezc", /* is_bare */ true);
+  BranchCondOneRegHelper(&mips64::Mips64Assembler::Bnezc, "Bnezc", /* is_bare= */ true);
 }
 
 TEST_F(AssemblerMIPS64Test, BareBltzc) {
-  BranchCondOneRegHelper(&mips64::Mips64Assembler::Bltzc, "Bltzc", /* is_bare */ true);
+  BranchCondOneRegHelper(&mips64::Mips64Assembler::Bltzc, "Bltzc", /* is_bare= */ true);
 }
 
 TEST_F(AssemblerMIPS64Test, BareBgezc) {
-  BranchCondOneRegHelper(&mips64::Mips64Assembler::Bgezc, "Bgezc", /* is_bare */ true);
+  BranchCondOneRegHelper(&mips64::Mips64Assembler::Bgezc, "Bgezc", /* is_bare= */ true);
 }
 
 TEST_F(AssemblerMIPS64Test, BareBlezc) {
-  BranchCondOneRegHelper(&mips64::Mips64Assembler::Blezc, "Blezc", /* is_bare */ true);
+  BranchCondOneRegHelper(&mips64::Mips64Assembler::Blezc, "Blezc", /* is_bare= */ true);
 }
 
 TEST_F(AssemblerMIPS64Test, BareBgtzc) {
-  BranchCondOneRegHelper(&mips64::Mips64Assembler::Bgtzc, "Bgtzc", /* is_bare */ true);
+  BranchCondOneRegHelper(&mips64::Mips64Assembler::Bgtzc, "Bgtzc", /* is_bare= */ true);
 }
 
 TEST_F(AssemblerMIPS64Test, BareBeqc) {
-  BranchCondTwoRegsHelper(&mips64::Mips64Assembler::Beqc, "Beqc", /* is_bare */ true);
+  BranchCondTwoRegsHelper(&mips64::Mips64Assembler::Beqc, "Beqc", /* is_bare= */ true);
 }
 
 TEST_F(AssemblerMIPS64Test, BareBnec) {
-  BranchCondTwoRegsHelper(&mips64::Mips64Assembler::Bnec, "Bnec", /* is_bare */ true);
+  BranchCondTwoRegsHelper(&mips64::Mips64Assembler::Bnec, "Bnec", /* is_bare= */ true);
 }
 
 TEST_F(AssemblerMIPS64Test, BareBltc) {
-  BranchCondTwoRegsHelper(&mips64::Mips64Assembler::Bltc, "Bltc", /* is_bare */ true);
+  BranchCondTwoRegsHelper(&mips64::Mips64Assembler::Bltc, "Bltc", /* is_bare= */ true);
 }
 
 TEST_F(AssemblerMIPS64Test, BareBgec) {
-  BranchCondTwoRegsHelper(&mips64::Mips64Assembler::Bgec, "Bgec", /* is_bare */ true);
+  BranchCondTwoRegsHelper(&mips64::Mips64Assembler::Bgec, "Bgec", /* is_bare= */ true);
 }
 
 TEST_F(AssemblerMIPS64Test, BareBltuc) {
-  BranchCondTwoRegsHelper(&mips64::Mips64Assembler::Bltuc, "Bltuc", /* is_bare */ true);
+  BranchCondTwoRegsHelper(&mips64::Mips64Assembler::Bltuc, "Bltuc", /* is_bare= */ true);
 }
 
 TEST_F(AssemblerMIPS64Test, BareBgeuc) {
-  BranchCondTwoRegsHelper(&mips64::Mips64Assembler::Bgeuc, "Bgeuc", /* is_bare */ true);
+  BranchCondTwoRegsHelper(&mips64::Mips64Assembler::Bgeuc, "Bgeuc", /* is_bare= */ true);
 }
 
 TEST_F(AssemblerMIPS64Test, BareBc1eqz) {
-  BranchFpuCondHelper(&mips64::Mips64Assembler::Bc1eqz, "Bc1eqz", /* is_bare */ true);
+  BranchFpuCondHelper(&mips64::Mips64Assembler::Bc1eqz, "Bc1eqz", /* is_bare= */ true);
 }
 
 TEST_F(AssemblerMIPS64Test, BareBc1nez) {
-  BranchFpuCondHelper(&mips64::Mips64Assembler::Bc1nez, "Bc1nez", /* is_bare */ true);
+  BranchFpuCondHelper(&mips64::Mips64Assembler::Bc1nez, "Bc1nez", /* is_bare= */ true);
 }
 
 TEST_F(AssemblerMIPS64Test, BareBeqz) {
-  BranchCondOneRegHelper(&mips64::Mips64Assembler::Beqz, "Beqz", /* is_bare */ true);
+  BranchCondOneRegHelper(&mips64::Mips64Assembler::Beqz, "Beqz", /* is_bare= */ true);
 }
 
 TEST_F(AssemblerMIPS64Test, BareBnez) {
-  BranchCondOneRegHelper(&mips64::Mips64Assembler::Bnez, "Bnez", /* is_bare */ true);
+  BranchCondOneRegHelper(&mips64::Mips64Assembler::Bnez, "Bnez", /* is_bare= */ true);
 }
 
 TEST_F(AssemblerMIPS64Test, BareBltz) {
-  BranchCondOneRegHelper(&mips64::Mips64Assembler::Bltz, "Bltz", /* is_bare */ true);
+  BranchCondOneRegHelper(&mips64::Mips64Assembler::Bltz, "Bltz", /* is_bare= */ true);
 }
 
 TEST_F(AssemblerMIPS64Test, BareBgez) {
-  BranchCondOneRegHelper(&mips64::Mips64Assembler::Bgez, "Bgez", /* is_bare */ true);
+  BranchCondOneRegHelper(&mips64::Mips64Assembler::Bgez, "Bgez", /* is_bare= */ true);
 }
 
 TEST_F(AssemblerMIPS64Test, BareBlez) {
-  BranchCondOneRegHelper(&mips64::Mips64Assembler::Blez, "Blez", /* is_bare */ true);
+  BranchCondOneRegHelper(&mips64::Mips64Assembler::Blez, "Blez", /* is_bare= */ true);
 }
 
 TEST_F(AssemblerMIPS64Test, BareBgtz) {
-  BranchCondOneRegHelper(&mips64::Mips64Assembler::Bgtz, "Bgtz", /* is_bare */ true);
+  BranchCondOneRegHelper(&mips64::Mips64Assembler::Bgtz, "Bgtz", /* is_bare= */ true);
 }
 
 TEST_F(AssemblerMIPS64Test, BareBeq) {
-  BranchCondTwoRegsHelper(&mips64::Mips64Assembler::Beq, "Beq", /* is_bare */ true);
+  BranchCondTwoRegsHelper(&mips64::Mips64Assembler::Beq, "Beq", /* is_bare= */ true);
 }
 
 TEST_F(AssemblerMIPS64Test, BareBne) {
-  BranchCondTwoRegsHelper(&mips64::Mips64Assembler::Bne, "Bne", /* is_bare */ true);
+  BranchCondTwoRegsHelper(&mips64::Mips64Assembler::Bne, "Bne", /* is_bare= */ true);
 }
 
 TEST_F(AssemblerMIPS64Test, LongBeqc) {
@@ -1252,7 +1252,7 @@
   std::vector<mips64::GpuRegister*> reg1_registers = GetRegisters();
   std::vector<mips64::GpuRegister*> reg2_registers = GetRegisters();
   reg2_registers.erase(reg2_registers.begin());  // reg2 can't be ZERO, remove it.
-  std::vector<int64_t> imms = CreateImmediateValuesBits(/* imm_bits */ 16, /* as_uint */ true);
+  std::vector<int64_t> imms = CreateImmediateValuesBits(/* imm_bits= */ 16, /* as_uint= */ true);
   WarnOnCombinations(reg1_registers.size() * reg2_registers.size() * imms.size());
   std::ostringstream expected;
   for (mips64::GpuRegister* reg1 : reg1_registers) {
diff --git a/compiler/utils/x86/assembler_x86.cc b/compiler/utils/x86/assembler_x86.cc
index 2d1e451..4b073bd 100644
--- a/compiler/utils/x86/assembler_x86.cc
+++ b/compiler/utils/x86/assembler_x86.cc
@@ -2151,7 +2151,7 @@
 void X86Assembler::cmpw(const Address& address, const Immediate& imm) {
   AssemblerBuffer::EnsureCapacity ensured(&buffer_);
   EmitUint8(0x66);
-  EmitComplex(7, address, imm, /* is_16_op */ true);
+  EmitComplex(7, address, imm, /* is_16_op= */ true);
 }
 
 
@@ -2341,7 +2341,7 @@
   AssemblerBuffer::EnsureCapacity ensured(&buffer_);
   CHECK(imm.is_uint16() || imm.is_int16()) << imm.value();
   EmitUint8(0x66);
-  EmitComplex(0, address, imm, /* is_16_op */ true);
+  EmitComplex(0, address, imm, /* is_16_op= */ true);
 }
 
 
diff --git a/compiler/utils/x86_64/assembler_x86_64.cc b/compiler/utils/x86_64/assembler_x86_64.cc
index ae68fe9..c118bc6 100644
--- a/compiler/utils/x86_64/assembler_x86_64.cc
+++ b/compiler/utils/x86_64/assembler_x86_64.cc
@@ -2391,7 +2391,7 @@
   CHECK(imm.is_int32());
   EmitOperandSizeOverride();
   EmitOptionalRex32(address);
-  EmitComplex(7, address, imm, /* is_16_op */ true);
+  EmitComplex(7, address, imm, /* is_16_op= */ true);
 }
 
 
@@ -2805,7 +2805,7 @@
   CHECK(imm.is_uint16() || imm.is_int16()) << imm.value();
   EmitUint8(0x66);
   EmitOptionalRex32(address);
-  EmitComplex(0, address, imm, /* is_16_op */ true);
+  EmitComplex(0, address, imm, /* is_16_op= */ true);
 }
 
 
diff --git a/compiler/utils/x86_64/assembler_x86_64_test.cc b/compiler/utils/x86_64/assembler_x86_64_test.cc
index 528e037..461f028 100644
--- a/compiler/utils/x86_64/assembler_x86_64_test.cc
+++ b/compiler/utils/x86_64/assembler_x86_64_test.cc
@@ -2094,7 +2094,7 @@
   ArrayRef<const ManagedRegister> spill_regs(raw_spill_regs);
 
   size_t frame_size = 10 * kStackAlignment;
-  assembler->RemoveFrame(frame_size, spill_regs, /* may_suspend */ true);
+  assembler->RemoveFrame(frame_size, spill_regs, /* may_suspend= */ true);
 
   // Construct assembly text counterpart.
   std::ostringstream str;
diff --git a/compiler/verifier_deps_test.cc b/compiler/verifier_deps_test.cc
index 8c90aa7..c00f848 100644
--- a/compiler/verifier_deps_test.cc
+++ b/compiler/verifier_deps_test.cc
@@ -83,16 +83,16 @@
     compiler_driver_->InitializeThreadPools();
   }
 
-  void VerifyWithCompilerDriver(verifier::VerifierDeps* deps) {
+  void VerifyWithCompilerDriver(verifier::VerifierDeps* verifier_deps) {
     TimingLogger timings("Verify", false, false);
     // The compiler driver handles the verifier deps in the callbacks, so
     // remove what this class did for unit testing.
-    if (deps == nullptr) {
+    if (verifier_deps == nullptr) {
       // Create some verifier deps by default if they are not already specified.
-      deps = new verifier::VerifierDeps(dex_files_);
-      verifier_deps_.reset(deps);
+      verifier_deps = new verifier::VerifierDeps(dex_files_);
+      verifier_deps_.reset(verifier_deps);
     }
-    callbacks_->SetVerifierDeps(deps);
+    callbacks_->SetVerifierDeps(verifier_deps);
     compiler_driver_->Verify(class_loader_, dex_files_, &timings, verification_results_.get());
     callbacks_->SetVerifierDeps(nullptr);
     // Clear entries in the verification results to avoid hitting a DCHECK that
@@ -159,7 +159,7 @@
               method.GetIndex(),
               dex_cache_handle,
               class_loader_handle,
-              /* referrer */ nullptr,
+              /* referrer= */ nullptr,
               method.GetInvokeType(class_def->access_flags_));
       CHECK(resolved_method != nullptr);
       if (method_name == resolved_method->GetName()) {
@@ -173,12 +173,12 @@
                                 method.GetIndex(),
                                 resolved_method,
                                 method.GetAccessFlags(),
-                                true /* can_load_classes */,
-                                true /* allow_soft_failures */,
-                                true /* need_precise_constants */,
-                                false /* verify to dump */,
-                                true /* allow_thread_suspension */,
-                                0 /* api_level */);
+                                /* can_load_classes= */ true,
+                                /* allow_soft_failures= */ true,
+                                /* need_precise_constants= */ true,
+                                /* verify to dump */ false,
+                                /* allow_thread_suspension= */ true,
+                                /* api_level= */ 0);
         verifier.Verify();
         soa.Self()->SetVerifierDeps(nullptr);
         has_failures = verifier.HasFailures();
@@ -195,7 +195,7 @@
       LoadDexFile(soa, "VerifierDeps", multidex);
     }
     SetupCompilerDriver();
-    VerifyWithCompilerDriver(/* verifier_deps */ nullptr);
+    VerifyWithCompilerDriver(/* verifier_deps= */ nullptr);
   }
 
   bool TestAssignabilityRecording(const std::string& dst,
@@ -372,12 +372,12 @@
   bool HasMethod(const std::string& expected_klass,
                  const std::string& expected_name,
                  const std::string& expected_signature,
-                 bool expected_resolved,
+                 bool expect_resolved,
                  const std::string& expected_access_flags = "",
                  const std::string& expected_decl_klass = "") {
     for (auto& dex_dep : verifier_deps_->dex_deps_) {
       for (const VerifierDeps::MethodResolution& entry : dex_dep.second->methods_) {
-        if (expected_resolved != entry.IsResolved()) {
+        if (expect_resolved != entry.IsResolved()) {
           continue;
         }
 
@@ -398,7 +398,7 @@
           continue;
         }
 
-        if (expected_resolved) {
+        if (expect_resolved) {
           // Test access flags. Note that PrettyJavaAccessFlags always appends
           // a space after the modifiers. Add it to the expected access flags.
           std::string actual_access_flags = PrettyJavaAccessFlags(entry.GetAccessFlags());
@@ -482,42 +482,42 @@
 }
 
 TEST_F(VerifierDepsTest, Assignable_BothInBoot) {
-  ASSERT_TRUE(TestAssignabilityRecording(/* dst */ "Ljava/util/TimeZone;",
-                                         /* src */ "Ljava/util/SimpleTimeZone;",
-                                         /* is_strict */ true,
-                                         /* is_assignable */ true));
+  ASSERT_TRUE(TestAssignabilityRecording(/* dst= */ "Ljava/util/TimeZone;",
+                                         /* src= */ "Ljava/util/SimpleTimeZone;",
+                                         /* is_strict= */ true,
+                                         /* is_assignable= */ true));
   ASSERT_TRUE(HasAssignable("Ljava/util/TimeZone;", "Ljava/util/SimpleTimeZone;", true));
 }
 
 TEST_F(VerifierDepsTest, Assignable_DestinationInBoot1) {
-  ASSERT_TRUE(TestAssignabilityRecording(/* dst */ "Ljava/net/Socket;",
-                                         /* src */ "LMySSLSocket;",
-                                         /* is_strict */ true,
-                                         /* is_assignable */ true));
+  ASSERT_TRUE(TestAssignabilityRecording(/* dst= */ "Ljava/net/Socket;",
+                                         /* src= */ "LMySSLSocket;",
+                                         /* is_strict= */ true,
+                                         /* is_assignable= */ true));
   ASSERT_TRUE(HasAssignable("Ljava/net/Socket;", "Ljavax/net/ssl/SSLSocket;", true));
 }
 
 TEST_F(VerifierDepsTest, Assignable_DestinationInBoot2) {
-  ASSERT_TRUE(TestAssignabilityRecording(/* dst */ "Ljava/util/TimeZone;",
-                                         /* src */ "LMySimpleTimeZone;",
-                                         /* is_strict */ true,
-                                         /* is_assignable */ true));
+  ASSERT_TRUE(TestAssignabilityRecording(/* dst= */ "Ljava/util/TimeZone;",
+                                         /* src= */ "LMySimpleTimeZone;",
+                                         /* is_strict= */ true,
+                                         /* is_assignable= */ true));
   ASSERT_TRUE(HasAssignable("Ljava/util/TimeZone;", "Ljava/util/SimpleTimeZone;", true));
 }
 
 TEST_F(VerifierDepsTest, Assignable_DestinationInBoot3) {
-  ASSERT_TRUE(TestAssignabilityRecording(/* dst */ "Ljava/util/Collection;",
-                                         /* src */ "LMyThreadSet;",
-                                         /* is_strict */ true,
-                                         /* is_assignable */ true));
+  ASSERT_TRUE(TestAssignabilityRecording(/* dst= */ "Ljava/util/Collection;",
+                                         /* src= */ "LMyThreadSet;",
+                                         /* is_strict= */ true,
+                                         /* is_assignable= */ true));
   ASSERT_TRUE(HasAssignable("Ljava/util/Collection;", "Ljava/util/Set;", true));
 }
 
 TEST_F(VerifierDepsTest, Assignable_BothArrays_Resolved) {
-  ASSERT_TRUE(TestAssignabilityRecording(/* dst */ "[[Ljava/util/TimeZone;",
-                                         /* src */ "[[Ljava/util/SimpleTimeZone;",
-                                         /* is_strict */ true,
-                                         /* is_assignable */ true));
+  ASSERT_TRUE(TestAssignabilityRecording(/* dst= */ "[[Ljava/util/TimeZone;",
+                                         /* src= */ "[[Ljava/util/SimpleTimeZone;",
+                                         /* is_strict= */ true,
+                                         /* is_assignable= */ true));
   // If the component types of both arrays are resolved, we optimize the list of
   // dependencies by recording a dependency on the component types.
   ASSERT_FALSE(HasAssignable("[[Ljava/util/TimeZone;", "[[Ljava/util/SimpleTimeZone;", true));
@@ -526,34 +526,34 @@
 }
 
 TEST_F(VerifierDepsTest, NotAssignable_BothInBoot) {
-  ASSERT_TRUE(TestAssignabilityRecording(/* dst */ "Ljava/lang/Exception;",
-                                         /* src */ "Ljava/util/SimpleTimeZone;",
-                                         /* is_strict */ true,
-                                         /* is_assignable */ false));
+  ASSERT_TRUE(TestAssignabilityRecording(/* dst= */ "Ljava/lang/Exception;",
+                                         /* src= */ "Ljava/util/SimpleTimeZone;",
+                                         /* is_strict= */ true,
+                                         /* is_assignable= */ false));
   ASSERT_TRUE(HasAssignable("Ljava/lang/Exception;", "Ljava/util/SimpleTimeZone;", false));
 }
 
 TEST_F(VerifierDepsTest, NotAssignable_DestinationInBoot1) {
-  ASSERT_TRUE(TestAssignabilityRecording(/* dst */ "Ljava/lang/Exception;",
-                                         /* src */ "LMySSLSocket;",
-                                         /* is_strict */ true,
-                                         /* is_assignable */ false));
+  ASSERT_TRUE(TestAssignabilityRecording(/* dst= */ "Ljava/lang/Exception;",
+                                         /* src= */ "LMySSLSocket;",
+                                         /* is_strict= */ true,
+                                         /* is_assignable= */ false));
   ASSERT_TRUE(HasAssignable("Ljava/lang/Exception;", "Ljavax/net/ssl/SSLSocket;", false));
 }
 
 TEST_F(VerifierDepsTest, NotAssignable_DestinationInBoot2) {
-  ASSERT_TRUE(TestAssignabilityRecording(/* dst */ "Ljava/lang/Exception;",
-                                         /* src */ "LMySimpleTimeZone;",
-                                         /* is_strict */ true,
-                                         /* is_assignable */ false));
+  ASSERT_TRUE(TestAssignabilityRecording(/* dst= */ "Ljava/lang/Exception;",
+                                         /* src= */ "LMySimpleTimeZone;",
+                                         /* is_strict= */ true,
+                                         /* is_assignable= */ false));
   ASSERT_TRUE(HasAssignable("Ljava/lang/Exception;", "Ljava/util/SimpleTimeZone;", false));
 }
 
 TEST_F(VerifierDepsTest, NotAssignable_BothArrays) {
-  ASSERT_TRUE(TestAssignabilityRecording(/* dst */ "[Ljava/lang/Exception;",
-                                         /* src */ "[Ljava/util/SimpleTimeZone;",
-                                         /* is_strict */ true,
-                                         /* is_assignable */ false));
+  ASSERT_TRUE(TestAssignabilityRecording(/* dst= */ "[Ljava/lang/Exception;",
+                                         /* src= */ "[Ljava/util/SimpleTimeZone;",
+                                         /* is_strict= */ true,
+                                         /* is_assignable= */ false));
   ASSERT_TRUE(HasAssignable("Ljava/lang/Exception;", "Ljava/util/SimpleTimeZone;", false));
 }
 
@@ -589,7 +589,7 @@
   ASSERT_TRUE(HasMethod("Ljava/text/SimpleDateFormat;",
                         "setTimeZone",
                         "(Ljava/util/TimeZone;)V",
-                        /* expect_resolved */ true,
+                        /* expect_resolved= */ true,
                         "public",
                         "Ljava/text/DateFormat;"));
   ASSERT_TRUE(HasAssignable("Ljava/util/TimeZone;", "Ljava/util/SimpleTimeZone;", true));
@@ -824,7 +824,7 @@
   ASSERT_TRUE(HasMethod("Ljava/net/Socket;",
                         "setSocketImplFactory",
                         "(Ljava/net/SocketImplFactory;)V",
-                        /* expect_resolved */ true,
+                        /* expect_resolved= */ true,
                         "public static",
                         "Ljava/net/Socket;"));
 }
@@ -835,7 +835,7 @@
   ASSERT_TRUE(HasMethod("Ljavax/net/ssl/SSLSocket;",
                         "setSocketImplFactory",
                         "(Ljava/net/SocketImplFactory;)V",
-                        /* expect_resolved */ true,
+                        /* expect_resolved= */ true,
                         "public static",
                         "Ljava/net/Socket;"));
 }
@@ -845,7 +845,7 @@
   ASSERT_TRUE(HasMethod("LMySSLSocket;",
                         "setSocketImplFactory",
                         "(Ljava/net/SocketImplFactory;)V",
-                        /* expect_resolved */ true,
+                        /* expect_resolved= */ true,
                         "public static",
                         "Ljava/net/Socket;"));
 }
@@ -856,7 +856,7 @@
   ASSERT_TRUE(HasMethod("Ljava/util/Map$Entry;",
                         "comparingByKey",
                         "()Ljava/util/Comparator;",
-                        /* expect_resolved */ true,
+                        /* expect_resolved= */ true,
                         "public static",
                         "Ljava/util/Map$Entry;"));
 }
@@ -867,7 +867,7 @@
   ASSERT_TRUE(HasMethod("Ljava/util/AbstractMap$SimpleEntry;",
                         "comparingByKey",
                         "()Ljava/util/Comparator;",
-                        /* expect_resolved */ false));
+                        /* expect_resolved= */ false));
 }
 
 TEST_F(VerifierDepsTest, InvokeStatic_Unresolved1) {
@@ -876,7 +876,7 @@
   ASSERT_TRUE(HasMethod("Ljavax/net/ssl/SSLSocket;",
                         "x",
                         "()V",
-                        /* expect_resolved */ false));
+                        /* expect_resolved= */ false));
 }
 
 TEST_F(VerifierDepsTest, InvokeStatic_Unresolved2) {
@@ -884,7 +884,7 @@
   ASSERT_TRUE(HasMethod("LMySSLSocket;",
                         "x",
                         "()V",
-                        /* expect_resolved */ false));
+                        /* expect_resolved= */ false));
 }
 
 TEST_F(VerifierDepsTest, InvokeDirect_Resolved_DeclaredInReferenced) {
@@ -893,7 +893,7 @@
   ASSERT_TRUE(HasMethod("Ljava/net/Socket;",
                         "<init>",
                         "()V",
-                        /* expect_resolved */ true,
+                        /* expect_resolved= */ true,
                         "public",
                         "Ljava/net/Socket;"));
 }
@@ -904,7 +904,7 @@
   ASSERT_TRUE(HasMethod("Ljavax/net/ssl/SSLSocket;",
                         "checkOldImpl",
                         "()V",
-                        /* expect_resolved */ true,
+                        /* expect_resolved= */ true,
                         "private",
                         "Ljava/net/Socket;"));
 }
@@ -914,7 +914,7 @@
   ASSERT_TRUE(HasMethod("LMySSLSocket;",
                         "checkOldImpl",
                         "()V",
-                        /* expect_resolved */ true,
+                        /* expect_resolved= */ true,
                         "private",
                         "Ljava/net/Socket;"));
 }
@@ -925,7 +925,7 @@
   ASSERT_TRUE(HasMethod("Ljavax/net/ssl/SSLSocket;",
                         "x",
                         "()V",
-                        /* expect_resolved */ false));
+                        /* expect_resolved= */ false));
 }
 
 TEST_F(VerifierDepsTest, InvokeDirect_Unresolved2) {
@@ -933,7 +933,7 @@
   ASSERT_TRUE(HasMethod("LMySSLSocket;",
                         "x",
                         "()V",
-                        /* expect_resolved */ false));
+                        /* expect_resolved= */ false));
 }
 
 TEST_F(VerifierDepsTest, InvokeVirtual_Resolved_DeclaredInReferenced) {
@@ -942,7 +942,7 @@
   ASSERT_TRUE(HasMethod("Ljava/lang/Throwable;",
                         "getMessage",
                         "()Ljava/lang/String;",
-                        /* expect_resolved */ true,
+                        /* expect_resolved= */ true,
                         "public",
                         "Ljava/lang/Throwable;"));
   // Type dependency on `this` argument.
@@ -955,7 +955,7 @@
   ASSERT_TRUE(HasMethod("Ljava/io/InterruptedIOException;",
                         "getMessage",
                         "()Ljava/lang/String;",
-                        /* expect_resolved */ true,
+                        /* expect_resolved= */ true,
                         "public",
                         "Ljava/lang/Throwable;"));
   // Type dependency on `this` argument.
@@ -967,7 +967,7 @@
   ASSERT_TRUE(HasMethod("LMySocketTimeoutException;",
                         "getMessage",
                         "()Ljava/lang/String;",
-                        /* expect_resolved */ true,
+                        /* expect_resolved= */ true,
                         "public",
                         "Ljava/lang/Throwable;"));
 }
@@ -977,7 +977,7 @@
   ASSERT_TRUE(HasMethod("LMyThreadSet;",
                         "size",
                         "()I",
-                        /* expect_resolved */ true,
+                        /* expect_resolved= */ true,
                         "public",
                         "Ljava/util/Set;"));
 }
@@ -988,7 +988,7 @@
   ASSERT_TRUE(HasMethod("Ljava/io/InterruptedIOException;",
                         "x",
                         "()V",
-                        /* expect_resolved */ false));
+                        /* expect_resolved= */ false));
 }
 
 TEST_F(VerifierDepsTest, InvokeVirtual_Unresolved2) {
@@ -996,7 +996,7 @@
   ASSERT_TRUE(HasMethod("LMySocketTimeoutException;",
                         "x",
                         "()V",
-                        /* expect_resolved */ false));
+                        /* expect_resolved= */ false));
 }
 
 TEST_F(VerifierDepsTest, InvokeInterface_Resolved_DeclaredInReferenced) {
@@ -1005,7 +1005,7 @@
   ASSERT_TRUE(HasMethod("Ljava/lang/Runnable;",
                         "run",
                         "()V",
-                        /* expect_resolved */ true,
+                        /* expect_resolved= */ true,
                         "public",
                         "Ljava/lang/Runnable;"));
 }
@@ -1016,7 +1016,7 @@
   ASSERT_TRUE(HasMethod("LMyThread;",
                         "join",
                         "()V",
-                        /* expect_resolved */ true,
+                        /* expect_resolved= */ true,
                         "public",
                         "Ljava/lang/Thread;"));
 }
@@ -1027,7 +1027,7 @@
   ASSERT_TRUE(HasMethod("LMyThreadSet;",
                         "run",
                         "()V",
-                        /* expect_resolved */ true,
+                        /* expect_resolved= */ true,
                         "public",
                         "Ljava/lang/Thread;"));
 }
@@ -1037,7 +1037,7 @@
   ASSERT_TRUE(HasMethod("LMyThreadSet;",
                         "isEmpty",
                         "()Z",
-                        /* expect_resolved */ true,
+                        /* expect_resolved= */ true,
                         "public",
                         "Ljava/util/Set;"));
 }
@@ -1048,12 +1048,12 @@
   ASSERT_TRUE(HasMethod("Ljava/lang/Runnable;",
                         "x",
                         "()V",
-                        /* expect_resolved */ false));
+                        /* expect_resolved= */ false));
 }
 
 TEST_F(VerifierDepsTest, InvokeInterface_Unresolved2) {
   ASSERT_FALSE(VerifyMethod("InvokeInterface_Unresolved2"));
-  ASSERT_TRUE(HasMethod("LMyThreadSet;", "x", "()V", /* expect_resolved */ false));
+  ASSERT_TRUE(HasMethod("LMyThreadSet;", "x", "()V", /* expect_resolved= */ false));
 }
 
 TEST_F(VerifierDepsTest, InvokeSuper_ThisAssignable) {
@@ -1063,7 +1063,7 @@
   ASSERT_TRUE(HasMethod("Ljava/lang/Runnable;",
                         "run",
                         "()V",
-                        /* expect_resolved */ true,
+                        /* expect_resolved= */ true,
                         "public",
                         "Ljava/lang/Runnable;"));
 }
@@ -1074,7 +1074,7 @@
   ASSERT_TRUE(HasAssignable("Ljava/lang/Integer;", "Ljava/lang/Thread;", false));
   ASSERT_TRUE(HasMethod("Ljava/lang/Integer;",
                         "intValue", "()I",
-                        /* expect_resolved */ true,
+                        /* expect_resolved= */ true,
                         "public", "Ljava/lang/Integer;"));
 }
 
@@ -1443,7 +1443,7 @@
         ScopedObjectAccess soa(Thread::Current());
         LoadDexFile(soa, "VerifierDeps", multi);
       }
-      VerifyWithCompilerDriver(/* verifier_deps */ nullptr);
+      VerifyWithCompilerDriver(/* verifier_deps= */ nullptr);
 
       std::vector<uint8_t> buffer;
       verifier_deps_->Encode(dex_files_, &buffer);
@@ -1493,22 +1493,22 @@
 }
 
 TEST_F(VerifierDepsTest, NotAssignable_InterfaceWithClassInBoot) {
-  ASSERT_TRUE(TestAssignabilityRecording(/* dst */ "Ljava/lang/Exception;",
-                                         /* src */ "LIface;",
-                                         /* is_strict */ true,
-                                         /* is_assignable */ false));
+  ASSERT_TRUE(TestAssignabilityRecording(/* dst= */ "Ljava/lang/Exception;",
+                                         /* src= */ "LIface;",
+                                         /* is_strict= */ true,
+                                         /* is_assignable= */ false));
   ASSERT_TRUE(HasAssignable("Ljava/lang/Exception;", "LIface;", false));
 }
 
 TEST_F(VerifierDepsTest, Assignable_Arrays) {
-  ASSERT_TRUE(TestAssignabilityRecording(/* dst */ "[LIface;",
-                                         /* src */ "[LMyClassExtendingInterface;",
-                                         /* is_strict */ false,
-                                         /* is_assignable */ true));
+  ASSERT_TRUE(TestAssignabilityRecording(/* dst= */ "[LIface;",
+                                         /* src= */ "[LMyClassExtendingInterface;",
+                                         /* is_strict= */ false,
+                                         /* is_assignable= */ true));
   ASSERT_FALSE(HasAssignable(
-      "LIface;", "LMyClassExtendingInterface;", /* expected_is_assignable */ true));
+      "LIface;", "LMyClassExtendingInterface;", /* expected_is_assignable= */ true));
   ASSERT_FALSE(HasAssignable(
-      "LIface;", "LMyClassExtendingInterface;", /* expected_is_assignable */ false));
+      "LIface;", "LMyClassExtendingInterface;", /* expected_is_assignable= */ false));
 }
 
 }  // namespace verifier
diff --git a/dex2oat/dex2oat.cc b/dex2oat/dex2oat.cc
index 872fab3..ea4158a 100644
--- a/dex2oat/dex2oat.cc
+++ b/dex2oat/dex2oat.cc
@@ -479,14 +479,13 @@
   UsageError("  --compilation-reason=<string>: optional metadata specifying the reason for");
   UsageError("      compiling the apk. If specified, the string will be embedded verbatim in");
   UsageError("      the key value store of the oat file.");
+  UsageError("      Example: --compilation-reason=install");
   UsageError("");
   UsageError("  --resolve-startup-const-strings=true|false: If true, the compiler eagerly");
   UsageError("      resolves strings referenced from const-string of startup methods.");
   UsageError("");
   UsageError("  --max-image-block-size=<size>: Maximum solid block size for compressed images.");
   UsageError("");
-  UsageError("      Example: --compilation-reason=install");
-  UsageError("");
   std::cerr << "See log for usage error information\n";
   exit(EXIT_FAILURE);
 }
@@ -601,8 +600,7 @@
         Fatal(StringPrintf("dex2oat did not finish after %" PRId64 " seconds",
                            timeout_in_milliseconds_/1000));
       } else if (rc != 0) {
-        std::string message(StringPrintf("pthread_cond_timedwait failed: %s",
-                                         strerror(errno)));
+        std::string message(StringPrintf("pthread_cond_timedwait failed: %s", strerror(rc)));
         Fatal(message.c_str());
       }
     }
diff --git a/dexdump/Android.bp b/dexdump/Android.bp
index d15bbda..434cb35 100644
--- a/dexdump/Android.bp
+++ b/dexdump/Android.bp
@@ -49,6 +49,9 @@
         darwin: {
             enabled: false,
         },
+        windows: {
+            enabled: true,
+        },
     },
 }
 
diff --git a/libartbase/Android.bp b/libartbase/Android.bp
index c3fb5fd..0fcd6a5 100644
--- a/libartbase/Android.bp
+++ b/libartbase/Android.bp
@@ -35,7 +35,6 @@
         "base/memory_region.cc",
         "base/mem_map.cc",
         // "base/mem_map_fuchsia.cc", put in target when fuchsia supported by soong
-        "base/mem_map_unix.cc",
         "base/os_linux.cc",
         "base/runtime_debug.cc",
         "base/safe_copy.cc",
@@ -50,20 +49,38 @@
     ],
     target: {
         android: {
+            srcs: [
+                "base/mem_map_unix.cc",
+            ],
             static_libs: [
                 // ZipArchive support, the order matters here to get all symbols.
                 "libziparchive",
                 "libz",
             ],
+            shared_libs: [
+                "liblog",
+                // For ashmem.
+                "libcutils",
+                // For common macros.
+                "libbase",
+            ],
             // Exclude the version script from Darwin host since it's not
             // supported by the linker there. That means ASan checks on Darwin
             // might trigger ODR violations.
             version_script: "libartbase.map",
         },
-        host: {
+        not_windows: {
+            srcs: [
+                "base/mem_map_unix.cc",
+            ],
             shared_libs: [
                 "libziparchive",
                 "libz",
+                "liblog",
+                // For ashmem.
+                "libcutils",
+                // For common macros.
+                "libbase",
             ],
         },
         linux_glibc: {
@@ -71,17 +88,20 @@
         },
         windows: {
             version_script: "libartbase.map",
+            static_libs: [
+                "libziparchive",
+                "libz",
+                "liblog",
+                // For ashmem.
+                "libcutils",
+                // For common macros.
+                "libbase",
+            ],
+            cflags: ["-Wno-thread-safety"],
         },
     },
     generated_sources: ["art_libartbase_operator_srcs"],
     cflags: ["-DBUILDING_LIBART=1"],
-    shared_libs: [
-        "liblog",
-        // For ashmem.
-        "libcutils",
-        // For common macros.
-        "libbase",
-    ],
 
     // Utilities used by various ART libs and tools are linked in statically
     // here to avoid shared lib dependencies outside the ART APEX. No target
@@ -147,6 +167,14 @@
         "libziparchive",
     ],
     export_shared_lib_headers: ["libbase"],
+    target: {
+        windows: {
+            enabled: true,
+            shared: {
+                enabled: false,
+            },
+        },
+    },
 }
 
 art_cc_library {
@@ -160,6 +188,14 @@
         "libziparchive",
     ],
     export_shared_lib_headers: ["libbase"],
+    target: {
+        windows: {
+            enabled: true,
+            shared: {
+                enabled: false,
+            },
+        },
+    },
 }
 
 art_cc_library {
diff --git a/libartbase/base/arena_allocator.cc b/libartbase/base/arena_allocator.cc
index df3deba..0e7f6cc 100644
--- a/libartbase/base/arena_allocator.cc
+++ b/libartbase/base/arena_allocator.cc
@@ -16,7 +16,6 @@
 
 #include "arena_allocator-inl.h"
 
-#include <sys/mman.h>
 
 #include <algorithm>
 #include <cstddef>
@@ -25,6 +24,8 @@
 
 #include <android-base/logging.h>
 
+#include "mman.h"
+
 namespace art {
 
 constexpr size_t kMemoryToolRedZoneBytes = 8;
diff --git a/libartbase/base/file_utils.cc b/libartbase/base/file_utils.cc
index f8d6016..9490798 100644
--- a/libartbase/base/file_utils.cc
+++ b/libartbase/base/file_utils.cc
@@ -19,11 +19,13 @@
 #include <inttypes.h>
 #include <sys/stat.h>
 #include <sys/types.h>
+#ifndef _WIN32
 #include <sys/wait.h>
+#endif
 #include <unistd.h>
 
 // We need dladdr.
-#ifndef __APPLE__
+#if !defined(__APPLE__) && !defined(_WIN32)
 #ifndef _GNU_SOURCE
 #define _GNU_SOURCE
 #define DEFINED_GNU_SOURCE
@@ -84,6 +86,10 @@
 }
 
 std::string GetAndroidRootSafe(std::string* error_msg) {
+#ifdef _WIN32
+  *error_msg = "GetAndroidRootSafe unsupported for Windows.";
+  return "";
+#else
   // Prefer ANDROID_ROOT if it's set.
   const char* android_dir = getenv("ANDROID_ROOT");
   if (android_dir != nullptr) {
@@ -118,6 +124,7 @@
     return "";
   }
   return "/system";
+#endif
 }
 
 std::string GetAndroidRoot() {
@@ -179,6 +186,15 @@
 
 void GetDalvikCache(const char* subdir, const bool create_if_absent, std::string* dalvik_cache,
                     bool* have_android_data, bool* dalvik_cache_exists, bool* is_global_cache) {
+#ifdef _WIN32
+  UNUSED(subdir);
+  UNUSED(create_if_absent);
+  UNUSED(dalvik_cache);
+  UNUSED(have_android_data);
+  UNUSED(dalvik_cache_exists);
+  UNUSED(is_global_cache);
+  LOG(FATAL) << "GetDalvikCache unsupported on Windows.";
+#else
   CHECK(subdir != nullptr);
   std::string error_msg;
   const char* android_data = GetAndroidDataSafe(&error_msg);
@@ -199,6 +215,7 @@
     *dalvik_cache_exists = ((mkdir(dalvik_cache_root.c_str(), 0700) == 0 || errno == EEXIST) &&
                             (mkdir(dalvik_cache->c_str(), 0700) == 0 || errno == EEXIST));
   }
+#endif
 }
 
 std::string GetDalvikCache(const char* subdir) {
@@ -262,9 +279,15 @@
 }
 
 bool LocationIsOnSystem(const char* path) {
+#ifdef _WIN32
+  UNUSED(path);
+  LOG(FATAL) << "LocationIsOnSystem is unsupported on Windows.";
+  return false;
+#else
   UniqueCPtr<const char[]> full_path(realpath(path, nullptr));
   return full_path != nullptr &&
       android::base::StartsWith(full_path.get(), GetAndroidRoot().c_str());
+#endif
 }
 
 bool LocationIsOnSystemFramework(const char* full_path) {
diff --git a/libartbase/base/malloc_arena_pool.cc b/libartbase/base/malloc_arena_pool.cc
index 02e29f1..4de34b5 100644
--- a/libartbase/base/malloc_arena_pool.cc
+++ b/libartbase/base/malloc_arena_pool.cc
@@ -16,7 +16,6 @@
 
 #include "malloc_arena_pool.h"
 
-#include <sys/mman.h>
 
 #include <algorithm>
 #include <cstddef>
@@ -25,6 +24,7 @@
 
 #include <android-base/logging.h>
 #include "arena_allocator-inl.h"
+#include "mman.h"
 
 namespace art {
 
diff --git a/libartbase/base/mem_map.cc b/libartbase/base/mem_map.cc
index 532ca28..2833750 100644
--- a/libartbase/base/mem_map.cc
+++ b/libartbase/base/mem_map.cc
@@ -18,8 +18,7 @@
 
 #include <inttypes.h>
 #include <stdlib.h>
-#include <sys/mman.h>  // For the PROT_* and MAP_* constants.
-#if !defined(ANDROID_OS) && !defined(__Fuchsia__)
+#if !defined(ANDROID_OS) && !defined(__Fuchsia__) && !defined(_WIN32)
 #include <sys/resource.h>
 #endif
 
@@ -39,6 +38,7 @@
 #include "globals.h"
 #include "logging.h"  // For VLOG_IS_ON.
 #include "memory_tool.h"
+#include "mman.h"  // For the PROT_* and MAP_* constants.
 #include "utils.h"
 
 #ifndef MAP_ANONYMOUS
@@ -811,19 +811,30 @@
     if (!kMadviseZeroes) {
       memset(base_begin_, 0, base_size_);
     }
+#ifdef _WIN32
+    // It is benign not to madvise away the pages here.
+    PLOG(WARNING) << "MemMap::MadviseDontNeedAndZero does not madvise on Windows.";
+#else
     int result = madvise(base_begin_, base_size_, MADV_DONTNEED);
     if (result == -1) {
       PLOG(WARNING) << "madvise failed";
     }
+#endif
   }
 }
 
 bool MemMap::Sync() {
+#ifdef _WIN32
+  // TODO: add FlushViewOfFile support.
+  PLOG(ERROR) << "MemMap::Sync unsupported on Windows.";
+  return false;
+#else
   // Historical note: To avoid Valgrind errors, we temporarily lifted the lower-end noaccess
   // protection before passing it to msync() when `redzone_size_` was non-null, as Valgrind
   // only accepts page-aligned base address, and excludes the higher-end noaccess protection
   // from the msync range. b/27552451.
   return msync(BaseBegin(), BaseSize(), MS_SYNC) == 0;
+#endif
 }
 
 bool MemMap::Protect(int prot) {
@@ -832,10 +843,12 @@
     return true;
   }
 
+#ifndef _WIN32
   if (mprotect(base_begin_, base_size_, prot) == 0) {
     prot_ = prot;
     return true;
   }
+#endif
 
   PLOG(ERROR) << "mprotect(" << reinterpret_cast<void*>(base_begin_) << ", " << base_size_ << ", "
               << prot << ") failed";
@@ -1206,7 +1219,11 @@
     DCHECK_LE(page_begin, page_end);
     DCHECK_LE(page_end, mem_end);
     std::fill(mem_begin, page_begin, 0);
+#ifdef _WIN32
+    LOG(WARNING) << "ZeroAndReleasePages does not madvise on Windows.";
+#else
     CHECK_NE(madvise(page_begin, page_end - page_begin, MADV_DONTNEED), -1) << "madvise failed";
+#endif
     std::fill(page_end, mem_end, 0);
   }
 }
diff --git a/libartbase/base/mem_map_fuchsia.cc b/libartbase/base/mem_map_fuchsia.cc
index d1c92ce..6b0e06c 100644
--- a/libartbase/base/mem_map_fuchsia.cc
+++ b/libartbase/base/mem_map_fuchsia.cc
@@ -15,8 +15,8 @@
  */
 
 #include "mem_map.h"
-#include <sys/mman.h>
 #include "logging.h"
+#include "mman.h"
 
 #include <zircon/process.h>
 #include <zircon/syscalls.h>
diff --git a/libartbase/base/mem_map_test.cc b/libartbase/base/mem_map_test.cc
index 074d4c2..bf39fd1 100644
--- a/libartbase/base/mem_map_test.cc
+++ b/libartbase/base/mem_map_test.cc
@@ -16,8 +16,6 @@
 
 #include "mem_map.h"
 
-#include <sys/mman.h>
-
 #include <memory>
 #include <random>
 
@@ -25,6 +23,7 @@
 #include "common_runtime_test.h"  // For TEST_DISABLED_FOR_MIPS
 #include "logging.h"
 #include "memory_tool.h"
+#include "mman.h"
 #include "unix_file/fd_file.h"
 
 namespace art {
diff --git a/libartbase/base/mem_map_unix.cc b/libartbase/base/mem_map_unix.cc
index 601b049..ac854df 100644
--- a/libartbase/base/mem_map_unix.cc
+++ b/libartbase/base/mem_map_unix.cc
@@ -16,7 +16,7 @@
 
 #include "mem_map.h"
 
-#include <sys/mman.h>
+#include "mman.h"
 
 namespace art {
 
diff --git a/libartbase/base/membarrier.cc b/libartbase/base/membarrier.cc
index 4c86b6b..abb36bc 100644
--- a/libartbase/base/membarrier.cc
+++ b/libartbase/base/membarrier.cc
@@ -18,8 +18,10 @@
 
 #include <errno.h>
 
+#if !defined(_WIN32)
 #include <sys/syscall.h>
 #include <unistd.h>
+#endif
 #include "macros.h"
 
 #if defined(__BIONIC__)
diff --git a/libartbase/base/memfd.cc b/libartbase/base/memfd.cc
index 7c20401..780be32 100644
--- a/libartbase/base/memfd.cc
+++ b/libartbase/base/memfd.cc
@@ -18,9 +18,11 @@
 
 #include <errno.h>
 #include <stdio.h>
+#if !defined(_WIN32)
 #include <sys/syscall.h>
 #include <sys/utsname.h>
 #include <unistd.h>
+#endif
 
 #include "macros.h"
 
diff --git a/libartbase/base/mman.h b/libartbase/base/mman.h
new file mode 100644
index 0000000..bd63f65
--- /dev/null
+++ b/libartbase/base/mman.h
@@ -0,0 +1,44 @@
+/*
+ * Copyright (C) 2018 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef ART_LIBARTBASE_BASE_MMAN_H_
+#define ART_LIBARTBASE_BASE_MMAN_H_
+
+#ifdef _WIN32
+
+// There is no sys/mman.h in mingw.
+// As these are just placeholders for the APIs, all values are stubbed out.
+
+#define PROT_READ      0        // 0x1
+#define PROT_WRITE     0        // 0x2
+#define PROT_EXEC      0        // 0x4
+#define PROT_NONE      0        // 0x0
+
+#define MAP_SHARED     0        // 0x01
+#define MAP_PRIVATE    0        // 0x02
+
+#define MAP_FAILED     nullptr  // ((void*) -1)
+#define MAP_FIXED      0        // 0x10
+#define MAP_ANONYMOUS  0        // 0x20
+
+#else
+
+#include <sys/mman.h>
+
+#endif
+
+
+#endif  // ART_LIBARTBASE_BASE_MMAN_H_
diff --git a/libartbase/base/os_linux.cc b/libartbase/base/os_linux.cc
index f8b31cf..a00779e 100644
--- a/libartbase/base/os_linux.cc
+++ b/libartbase/base/os_linux.cc
@@ -50,7 +50,12 @@
 }
 
 File* OS::CreateEmptyFileWriteOnly(const char* name) {
-  return art::CreateEmptyFile(name, O_WRONLY | O_TRUNC | O_NOFOLLOW | O_CLOEXEC);
+#ifdef _WIN32
+  int flags = O_WRONLY | O_TRUNC;
+#else
+  int flags = O_WRONLY | O_TRUNC | O_NOFOLLOW | O_CLOEXEC;
+#endif
+  return art::CreateEmptyFile(name, flags);
 }
 
 File* OS::OpenFileWithFlags(const char* name, int flags, bool auto_flush) {
diff --git a/libartbase/base/safe_copy.cc b/libartbase/base/safe_copy.cc
index b46b921..ad75aa7 100644
--- a/libartbase/base/safe_copy.cc
+++ b/libartbase/base/safe_copy.cc
@@ -16,8 +16,10 @@
 
 #include "safe_copy.h"
 
+#ifdef __linux__
 #include <sys/uio.h>
 #include <sys/user.h>
+#endif
 #include <unistd.h>
 
 #include <algorithm>
diff --git a/libartbase/base/safe_copy_test.cc b/libartbase/base/safe_copy_test.cc
index c23651f..9f7d409 100644
--- a/libartbase/base/safe_copy_test.cc
+++ b/libartbase/base/safe_copy_test.cc
@@ -18,12 +18,12 @@
 
 #include <errno.h>
 #include <string.h>
-#include <sys/mman.h>
 #include <sys/user.h>
 
 #include "android-base/logging.h"
 #include "globals.h"
 #include "gtest/gtest.h"
+#include "mman.h"
 
 
 namespace art {
diff --git a/libartbase/base/scoped_flock.cc b/libartbase/base/scoped_flock.cc
index 2f16fb2..b16a45a 100644
--- a/libartbase/base/scoped_flock.cc
+++ b/libartbase/base/scoped_flock.cc
@@ -35,6 +35,14 @@
 
 /* static */ ScopedFlock LockedFile::Open(const char* filename, int flags, bool block,
                                           std::string* error_msg) {
+#ifdef _WIN32
+  // TODO: implement file locking for Windows.
+  UNUSED(filename);
+  UNUSED(flags);
+  UNUSED(block);
+  *error_msg = "flock is unsupported on Windows";
+  return nullptr;
+#else
   while (true) {
     // NOTE: We don't check usage here because the ScopedFlock should *never* be
     // responsible for flushing its underlying FD. Its only purpose should be
@@ -89,10 +97,19 @@
 
     return ScopedFlock(new LockedFile(std::move((*file.get()))));
   }
+#endif
 }
 
 ScopedFlock LockedFile::DupOf(const int fd, const std::string& path,
                               const bool read_only_mode, std::string* error_msg) {
+#ifdef _WIN32
+  // TODO: implement file locking for Windows.
+  UNUSED(fd);
+  UNUSED(path);
+  UNUSED(read_only_mode);
+  *error_msg = "flock is unsupported on Windows.";
+  return nullptr;
+#else
   // NOTE: We don't check usage here because the ScopedFlock should *never* be
   // responsible for flushing its underlying FD. Its only purpose should be
   // to acquire a lock, and the unlock / close in the corresponding
@@ -112,9 +129,11 @@
   }
 
   return locked_file;
+#endif
 }
 
 void LockedFile::ReleaseLock() {
+#ifndef _WIN32
   if (this->Fd() != -1) {
     int flock_result = TEMP_FAILURE_RETRY(flock(this->Fd(), LOCK_UN));
     if (flock_result != 0) {
@@ -126,6 +145,7 @@
       PLOG(WARNING) << "Unable to unlock file " << this->GetPath();
     }
   }
+#endif
 }
 
 }  // namespace art
diff --git a/libartbase/base/socket_peer_is_trusted.cc b/libartbase/base/socket_peer_is_trusted.cc
index 440054e..3996d90 100644
--- a/libartbase/base/socket_peer_is_trusted.cc
+++ b/libartbase/base/socket_peer_is_trusted.cc
@@ -16,8 +16,10 @@
 
 #include "socket_peer_is_trusted.h"
 
+#if !defined(_WIN32)
 #include <pwd.h>
 #include <sys/socket.h>
+#endif
 
 #include <android-base/logging.h>
 
diff --git a/libartbase/base/time_utils.cc b/libartbase/base/time_utils.cc
index cb30246..aa6c987 100644
--- a/libartbase/base/time_utils.cc
+++ b/libartbase/base/time_utils.cc
@@ -14,12 +14,14 @@
  * limitations under the License.
  */
 
+#include "time_utils.h"
+
 #include <inttypes.h>
+#include <stdio.h>
+
 #include <limits>
 #include <sstream>
 
-#include "time_utils.h"
-
 #include "android-base/stringprintf.h"
 
 #include "logging.h"
@@ -30,6 +32,20 @@
 
 namespace art {
 
+namespace {
+
+#if !defined(__linux__)
+int GetTimeOfDay(struct timeval* tv, struct timezone* tz) {
+#ifdef _WIN32
+  return mingw_gettimeofday(tv, tz);
+#else
+  return gettimeofday(tv, tz);
+#endif
+}
+#endif
+
+}  // namespace
+
 using android::base::StringPrintf;
 
 std::string PrettyDuration(uint64_t nano_duration, size_t max_fraction_digits) {
@@ -117,7 +133,12 @@
 std::string GetIsoDate() {
   time_t now = time(nullptr);
   tm tmbuf;
+#ifdef _WIN32
+  localtime_s(&tmbuf, &now);
+  tm* ptm = &tmbuf;
+#else
   tm* ptm = localtime_r(&now, &tmbuf);
+#endif
   return StringPrintf("%04d-%02d-%02d %02d:%02d:%02d",
       ptm->tm_year + 1900, ptm->tm_mon+1, ptm->tm_mday,
       ptm->tm_hour, ptm->tm_min, ptm->tm_sec);
@@ -130,7 +151,7 @@
   return static_cast<uint64_t>(now.tv_sec) * UINT64_C(1000) + now.tv_nsec / UINT64_C(1000000);
 #else
   timeval now;
-  gettimeofday(&now, nullptr);
+  GetTimeOfDay(&now, nullptr);
   return static_cast<uint64_t>(now.tv_sec) * UINT64_C(1000) + now.tv_usec / UINT64_C(1000);
 #endif
 }
@@ -142,7 +163,7 @@
   return static_cast<uint64_t>(now.tv_sec) * UINT64_C(1000000) + now.tv_nsec / UINT64_C(1000);
 #else
   timeval now;
-  gettimeofday(&now, nullptr);
+  GetTimeOfDay(&now, nullptr);
   return static_cast<uint64_t>(now.tv_sec) * UINT64_C(1000000) + now.tv_usec;
 #endif
 }
@@ -154,7 +175,7 @@
   return static_cast<uint64_t>(now.tv_sec) * UINT64_C(1000000000) + now.tv_nsec;
 #else
   timeval now;
-  gettimeofday(&now, nullptr);
+  GetTimeOfDay(&now, nullptr);
   return static_cast<uint64_t>(now.tv_sec) * UINT64_C(1000000000) + now.tv_usec * UINT64_C(1000);
 #endif
 }
@@ -195,12 +216,12 @@
 
 void InitTimeSpec(bool absolute, int clock, int64_t ms, int32_t ns, timespec* ts) {
   if (absolute) {
-#if !defined(__APPLE__)
+#if defined(__linux__)
     clock_gettime(clock, ts);
 #else
     UNUSED(clock);
     timeval tv;
-    gettimeofday(&tv, nullptr);
+    GetTimeOfDay(&tv, nullptr);
     ts->tv_sec = tv.tv_sec;
     ts->tv_nsec = tv.tv_usec * 1000;
 #endif
diff --git a/libartbase/base/time_utils.h b/libartbase/base/time_utils.h
index 431d3e1..15805f3 100644
--- a/libartbase/base/time_utils.h
+++ b/libartbase/base/time_utils.h
@@ -18,6 +18,7 @@
 #define ART_LIBARTBASE_BASE_TIME_UTILS_H_
 
 #include <stdint.h>
+#include <stdio.h>  // Needed for correct _WIN32 build.
 #include <time.h>
 
 #include <string>
diff --git a/libartbase/base/unix_file/fd_file.cc b/libartbase/base/unix_file/fd_file.cc
index 76894c6..8831b9c 100644
--- a/libartbase/base/unix_file/fd_file.cc
+++ b/libartbase/base/unix_file/fd_file.cc
@@ -25,8 +25,13 @@
 #include <android/fdsan.h>
 #endif
 
+#if defined(_WIN32)
+#include <windows.h>
+#endif
+
 #include <limits>
 
+#include <android-base/file.h>
 #include <android-base/logging.h>
 
 // Includes needed for FdFile::Copy().
@@ -40,6 +45,96 @@
 
 namespace unix_file {
 
+#if defined(_WIN32)
+// RAII wrapper for an event object to allow asynchronous I/O to correctly signal completion.
+class ScopedEvent {
+ public:
+  ScopedEvent() {
+    handle_ = CreateEventA(/*lpEventAttributes*/ nullptr,
+                           /*bManualReset*/ true,
+                           /*bInitialState*/ false,
+                           /*lpName*/ nullptr);
+  }
+
+  ~ScopedEvent() { CloseHandle(handle_); }
+
+  HANDLE handle() { return handle_; }
+
+ private:
+  HANDLE handle_;
+  DISALLOW_COPY_AND_ASSIGN(ScopedEvent);
+};
+
+// Windows implementation of pread/pwrite. Note that these DO move the file descriptor's read/write
+// position, but do so atomically.
+static ssize_t pread(int fd, void* data, size_t byte_count, off64_t offset) {
+  ScopedEvent event;
+  if (event.handle() == INVALID_HANDLE_VALUE) {
+    PLOG(ERROR) << "Could not create event handle.";
+    errno = EIO;
+    return static_cast<ssize_t>(-1);
+  }
+
+  auto handle = reinterpret_cast<HANDLE>(_get_osfhandle(fd));
+  DWORD bytes_read = 0;
+  OVERLAPPED overlapped = {};
+  overlapped.Offset = static_cast<DWORD>(offset);
+  overlapped.OffsetHigh = static_cast<DWORD>(offset >> 32);
+  overlapped.hEvent = event.handle();
+  if (!ReadFile(handle, data, static_cast<DWORD>(byte_count), &bytes_read, &overlapped)) {
+    // If the read failed with other than ERROR_IO_PENDING, return an error.
+    // ERROR_IO_PENDING signals the write was begun asynchronously.
+    // Block until the asynchronous operation has finished or fails, and return
+    // result accordingly.
+    if (::GetLastError() != ERROR_IO_PENDING ||
+        !::GetOverlappedResult(handle, &overlapped, &bytes_read, TRUE)) {
+      // In case someone tries to read errno (since this is masquerading as a POSIX call).
+      errno = EIO;
+      return static_cast<ssize_t>(-1);
+    }
+  }
+  return static_cast<ssize_t>(bytes_read);
+}
+
+static ssize_t pwrite(int fd, const void* buf, size_t count, off64_t offset) {
+  ScopedEvent event;
+  if (event.handle() == INVALID_HANDLE_VALUE) {
+    PLOG(ERROR) << "Could not create event handle.";
+    errno = EIO;
+    return static_cast<ssize_t>(-1);
+  }
+
+  auto handle = reinterpret_cast<HANDLE>(_get_osfhandle(fd));
+  DWORD bytes_written = 0;
+  OVERLAPPED overlapped = {};
+  overlapped.Offset = static_cast<DWORD>(offset);
+  overlapped.OffsetHigh = static_cast<DWORD>(offset >> 32);
+  overlapped.hEvent = event.handle();
+  if (!::WriteFile(handle, buf, count, &bytes_written, &overlapped)) {
+    // If the write failed with other than ERROR_IO_PENDING, return an error.
+    // ERROR_IO_PENDING signals the write was begun asynchronously.
+    // Block until the asynchronous operation has finished or fails, and return
+    // result accordingly.
+    if (::GetLastError() != ERROR_IO_PENDING ||
+        !::GetOverlappedResult(handle, &overlapped, &bytes_written, TRUE)) {
+      // In case someone tries to read errno (since this is masquerading as a POSIX call).
+      errno = EIO;
+      return static_cast<ssize_t>(-1);
+    }
+  }
+  return static_cast<ssize_t>(bytes_written);
+}
+
+static int fsync(int fd) {
+  auto handle = reinterpret_cast<HANDLE>(_get_osfhandle(fd));
+  if (handle != INVALID_HANDLE_VALUE && ::FlushFileBuffers(handle)) {
+    return 0;
+  }
+  errno = EINVAL;
+  return -1;
+}
+#endif
+
 #if defined(__BIONIC__)
 static uint64_t GetFdFileOwnerTag(FdFile* fd_file) {
   return android_fdsan_create_owner_tag(ANDROID_FDSAN_OWNER_TYPE_ART_FDFILE,
diff --git a/libartbase/base/utils.cc b/libartbase/base/utils.cc
index 0f172fd..58d8575 100644
--- a/libartbase/base/utils.cc
+++ b/libartbase/base/utils.cc
@@ -19,9 +19,7 @@
 #include <inttypes.h>
 #include <pthread.h>
 #include <sys/stat.h>
-#include <sys/syscall.h>
 #include <sys/types.h>
-#include <sys/wait.h>
 #include <unistd.h>
 
 #include <fstream>
@@ -47,6 +45,16 @@
 
 #if defined(__linux__)
 #include <linux/unistd.h>
+#include <sys/syscall.h>
+#endif
+
+#if defined(_WIN32)
+#include <windows.h>
+// This include needs to be here due to our coding conventions.  Unfortunately
+// it drags in the definition of the dread ERROR macro.
+#ifdef ERROR
+#undef ERROR
+#endif
 #endif
 
 namespace art {
@@ -61,6 +69,8 @@
   return owner;
 #elif defined(__BIONIC__)
   return gettid();
+#elif defined(_WIN32)
+  return static_cast<pid_t>(::GetCurrentThreadId());
 #else
   return syscall(__NR_gettid);
 #endif
@@ -68,12 +78,17 @@
 
 std::string GetThreadName(pid_t tid) {
   std::string result;
+#ifdef _WIN32
+  UNUSED(tid);
+  result = "<unknown>";
+#else
   // TODO: make this less Linux-specific.
   if (ReadFileToString(StringPrintf("/proc/self/task/%d/comm", tid), &result)) {
     result.resize(result.size() - 1);  // Lose the trailing '\n'.
   } else {
     result = "<unknown>";
   }
+#endif
   return result;
 }
 
@@ -137,7 +152,7 @@
   } else {
     s = thread_name + len - 15;
   }
-#if defined(__linux__)
+#if defined(__linux__) || defined(_WIN32)
   // pthread_setname_np fails rather than truncating long strings.
   char buf[16];       // MAX_TASK_COMM_LEN=16 is hard-coded in the kernel.
   strncpy(buf, s, sizeof(buf)-1);
@@ -153,6 +168,11 @@
 
 void GetTaskStats(pid_t tid, char* state, int* utime, int* stime, int* task_cpu) {
   *utime = *stime = *task_cpu = 0;
+#ifdef _WIN32
+  // TODO: implement this.
+  UNUSED(tid);
+  *state = 'S';
+#else
   std::string stats;
   // TODO: make this less Linux-specific.
   if (!ReadFileToString(StringPrintf("/proc/self/task/%d/stat", tid), &stats)) {
@@ -167,6 +187,7 @@
   *utime = strtoull(fields[11].c_str(), nullptr, 10);
   *stime = strtoull(fields[12].c_str(), nullptr, 10);
   *task_cpu = strtoull(fields[36].c_str(), nullptr, 10);
+#endif
 }
 
 static void ParseStringAfterChar(const std::string& s,
diff --git a/libartbase/base/zip_archive.cc b/libartbase/base/zip_archive.cc
index a7f4b28..5056edc 100644
--- a/libartbase/base/zip_archive.cc
+++ b/libartbase/base/zip_archive.cc
@@ -18,7 +18,6 @@
 
 #include <fcntl.h>
 #include <stdio.h>
-#include <sys/mman.h>  // For the PROT_* and MAP_* constants.
 #include <sys/stat.h>
 #include <sys/types.h>
 #include <unistd.h>
@@ -27,6 +26,7 @@
 #include "android-base/stringprintf.h"
 #include "ziparchive/zip_archive.h"
 
+#include "base/mman.h"
 #include "bit_utils.h"
 #include "unix_file/fd_file.h"
 
@@ -203,6 +203,11 @@
 }
 
 static void SetCloseOnExec(int fd) {
+#ifdef _WIN32
+  // Exec is not supported on Windows.
+  UNUSED(fd);
+  PLOG(ERROR) << "SetCloseOnExec is not supported on Windows.";
+#else
   // This dance is more portable than Linux's O_CLOEXEC open(2) flag.
   int flags = fcntl(fd, F_GETFD);
   if (flags == -1) {
@@ -214,6 +219,7 @@
     PLOG(WARNING) << "fcntl(" << fd << ", F_SETFD, " << flags << ") failed";
     return;
   }
+#endif
 }
 
 ZipArchive* ZipArchive::Open(const char* filename, std::string* error_msg) {
diff --git a/libdexfile/Android.bp b/libdexfile/Android.bp
index 4d6aa5c..a4f7e25 100644
--- a/libdexfile/Android.bp
+++ b/libdexfile/Android.bp
@@ -43,30 +43,58 @@
                 "libziparchive",
                 "libz",
             ],
+            shared_libs: [
+                 // For MemMap.
+                 "libartbase",
+                 "liblog",
+                 // For atrace.
+                 "libcutils",
+                 // For common macros.
+                 "libbase",
+            ],
+            export_shared_lib_headers: [
+                "libartbase",
+                "libbase",
+            ],
         },
-        host: {
+        not_windows: {
             shared_libs: [
                 "libziparchive",
                 "libz",
+                 // For MemMap.
+                 "libartbase",
+                 "liblog",
+                 // For atrace.
+                 "libcutils",
+                 // For common macros.
+                 "libbase",
             ],
+            export_shared_lib_headers: [
+                "libartbase",
+                "libbase",
+            ],
+        },
+        windows: {
+            static_libs: [
+                "libziparchive",
+                "libz",
+                 // For MemMap.
+                 "libartbase",
+                 "liblog",
+                 // For atrace.
+                 "libcutils",
+                 // For common macros.
+                 "libbase",
+            ],
+            export_static_lib_headers: [
+                "libartbase",
+                "libbase",
+            ],
+            cflags: ["-Wno-thread-safety"],
         },
     },
     generated_sources: ["dexfile_operator_srcs"],
-    shared_libs: [
-        // For MemMap.
-        "libartbase",
-        "liblog",
-        // For atrace.
-        "libcutils",
-        // For common macros.
-        "libbase",
-        "libz",
-    ],
     export_include_dirs: ["."],
-    export_shared_lib_headers: [
-        "libartbase",
-        "libbase",
-    ],
 }
 
 cc_defaults {
@@ -121,6 +149,14 @@
     strip: {
         keep_symbols: true,
     },
+    target: {
+        windows: {
+            enabled: true,
+            shared: {
+                enabled: false,
+            },
+        },
+    },
 }
 
 art_cc_library {
@@ -129,6 +165,14 @@
         "art_debug_defaults",
         "libdexfile_defaults",
     ],
+    target: {
+        windows: {
+            enabled: true,
+            shared: {
+                enabled: false,
+            },
+        },
+    },
 }
 
 cc_library_headers {
diff --git a/libdexfile/dex/art_dex_file_loader.cc b/libdexfile/dex/art_dex_file_loader.cc
index ae1322d..57e838f 100644
--- a/libdexfile/dex/art_dex_file_loader.cc
+++ b/libdexfile/dex/art_dex_file_loader.cc
@@ -16,7 +16,6 @@
 
 #include "art_dex_file_loader.h"
 
-#include <sys/mman.h>  // For the PROT_* and MAP_* constants.
 #include <sys/stat.h>
 
 #include "android-base/stringprintf.h"
@@ -24,6 +23,7 @@
 #include "base/file_magic.h"
 #include "base/file_utils.h"
 #include "base/mem_map.h"
+#include "base/mman.h"  // For the PROT_* and MAP_* constants.
 #include "base/stl_util.h"
 #include "base/systrace.h"
 #include "base/unix_file/fd_file.h"
diff --git a/libdexfile/dex/dex_file_layout.cc b/libdexfile/dex/dex_file_layout.cc
index 75a3111..929025a 100644
--- a/libdexfile/dex/dex_file_layout.cc
+++ b/libdexfile/dex/dex_file_layout.cc
@@ -16,9 +16,9 @@
 
 #include "dex_file_layout.h"
 
-#include <sys/mman.h>
 
 #include "base/bit_utils.h"
+#include "base/mman.h"
 #include "dex_file.h"
 
 namespace art {
@@ -26,6 +26,12 @@
 int DexLayoutSection::MadviseLargestPageAlignedRegion(const uint8_t* begin,
                                                       const uint8_t* end,
                                                       int advice) {
+#ifdef _WIN32
+  UNUSED(begin);
+  UNUSED(end);
+  UNUSED(advice);
+  PLOG(WARNING) << "madvise is unsupported on Windows.";
+#else
   DCHECK_LE(begin, end);
   begin = AlignUp(begin, kPageSize);
   end = AlignDown(end, kPageSize);
@@ -37,6 +43,7 @@
     }
     return result;
   }
+#endif
   return 0;
 }
 
@@ -50,6 +57,11 @@
 }
 
 void DexLayoutSections::Madvise(const DexFile* dex_file, MadviseState state) const {
+#ifdef _WIN32
+  UNUSED(dex_file);
+  UNUSED(state);
+  PLOG(WARNING) << "madvise is unsupported on Windows.";
+#else
   // The dex file is already defaulted to random access everywhere.
   for (const DexLayoutSection& section : sections_) {
     switch (state) {
@@ -79,6 +91,7 @@
       }
     }
   }
+#endif
 }
 
 std::ostream& operator<<(std::ostream& os, const DexLayoutSection& section) {
diff --git a/libdexfile/dex/dex_file_loader.cc b/libdexfile/dex/dex_file_loader.cc
index 1884bcf..a719d41 100644
--- a/libdexfile/dex/dex_file_loader.cc
+++ b/libdexfile/dex/dex_file_loader.cc
@@ -187,12 +187,18 @@
   std::string base_location = GetBaseLocation(dex_location);
   const char* suffix = dex_location + base_location.size();
   DCHECK(suffix[0] == 0 || suffix[0] == kMultiDexSeparator);
+#ifdef _WIN32
+  // Warning: No symbolic link processing here.
+  PLOG(WARNING) << "realpath is unsupported on Windows.";
+#else
   // Warning: Bionic implementation of realpath() allocates > 12KB on the stack.
   // Do not run this code on a small stack, e.g. in signal handler.
   UniqueCPtr<const char[]> path(realpath(base_location.c_str(), nullptr));
   if (path != nullptr && path.get() != base_location) {
     return std::string(path.get()) + suffix;
-  } else if (suffix[0] == 0) {
+  }
+#endif
+  if (suffix[0] == 0) {
     return base_location;
   } else {
     return dex_location;
diff --git a/libdexfile/external/include/art_api/ext_dex_file.h b/libdexfile/external/include/art_api/ext_dex_file.h
index 5f64ab1..4a52a2b 100644
--- a/libdexfile/external/include/art_api/ext_dex_file.h
+++ b/libdexfile/external/include/art_api/ext_dex_file.h
@@ -98,11 +98,11 @@
 // Minimal std::string look-alike for a string returned from libdexfile.
 class DexString final {
  public:
-  DexString(DexString&& dex_str) { ReplaceExtString(std::move(dex_str)); }
+  DexString(DexString&& dex_str) noexcept { ReplaceExtString(std::move(dex_str)); }
   explicit DexString(const char* str = "") : ext_string_(ExtDexFileMakeString(str)) {}
   ~DexString() { ExtDexFileFreeString(ext_string_); }
 
-  DexString& operator=(DexString&& dex_str) {
+  DexString& operator=(DexString&& dex_str) noexcept {
     ReplaceExtString(std::move(dex_str));
     return *this;
   }
@@ -163,7 +163,7 @@
 // thread-safe.
 class DexFile {
  public:
-  DexFile(DexFile&& dex_file) {
+  DexFile(DexFile&& dex_file) noexcept {
     ext_dex_file_ = dex_file.ext_dex_file_;
     dex_file.ext_dex_file_ = nullptr;
   }
diff --git a/libprofile/profile/profile_compilation_info.cc b/libprofile/profile/profile_compilation_info.cc
index 02f6344..9b32b9e 100644
--- a/libprofile/profile/profile_compilation_info.cc
+++ b/libprofile/profile/profile_compilation_info.cc
@@ -58,6 +58,12 @@
 // profile_compilation_info object. All the profile line headers are now placed together
 // before corresponding method_encodings and class_ids.
 const uint8_t ProfileCompilationInfo::kProfileVersion[] = { '0', '1', '0', '\0' };
+const uint8_t ProfileCompilationInfo::kProfileVersionWithCounters[] = { '5', '0', '0', '\0' };
+
+static_assert(sizeof(ProfileCompilationInfo::kProfileVersion) == 4,
+              "Invalid profile version size");
+static_assert(sizeof(ProfileCompilationInfo::kProfileVersionWithCounters) == 4,
+              "Invalid profile version size");
 
 // The name of the profile entry in the dex metadata file.
 // DO NOT CHANGE THIS! (it's similar to classes.dex in the apk files).
@@ -84,18 +90,31 @@
   return kDebugIgnoreChecksum || dex_file_checksum == checksum;
 }
 
+// For storage efficiency we store aggregation counts of up to at most 2^16.
+static uint16_t IncrementAggregationCounter(uint16_t counter, uint16_t value) {
+  if (counter < (std::numeric_limits<uint16_t>::max() - value)) {
+    return counter + value;
+  } else {
+    return std::numeric_limits<uint16_t>::max();
+  }
+}
+
 ProfileCompilationInfo::ProfileCompilationInfo(ArenaPool* custom_arena_pool)
     : default_arena_pool_(),
       allocator_(custom_arena_pool),
       info_(allocator_.Adapter(kArenaAllocProfile)),
-      profile_key_map_(std::less<const std::string>(), allocator_.Adapter(kArenaAllocProfile)) {
+      profile_key_map_(std::less<const std::string>(), allocator_.Adapter(kArenaAllocProfile)),
+      aggregation_count_(0) {
+  InitProfileVersionInternal(kProfileVersion);
 }
 
 ProfileCompilationInfo::ProfileCompilationInfo()
     : default_arena_pool_(),
       allocator_(&default_arena_pool_),
       info_(allocator_.Adapter(kArenaAllocProfile)),
-      profile_key_map_(std::less<const std::string>(), allocator_.Adapter(kArenaAllocProfile)) {
+      profile_key_map_(std::less<const std::string>(), allocator_.Adapter(kArenaAllocProfile)),
+      aggregation_count_(0) {
+  InitProfileVersionInternal(kProfileVersion);
 }
 
 ProfileCompilationInfo::~ProfileCompilationInfo() {
@@ -326,13 +345,15 @@
 /**
  * Serialization format:
  * [profile_header, zipped[[profile_line_header1, profile_line_header2...],[profile_line_data1,
- *    profile_line_data2...]]]
+ *    profile_line_data2...]],global_aggregation_counter]
  * profile_header:
  *   magic,version,number_of_dex_files,uncompressed_size_of_zipped_data,compressed_data_size
  * profile_line_header:
  *   dex_location,number_of_classes,methods_region_size,dex_location_checksum,num_method_ids
  * profile_line_data:
- *   method_encoding_1,method_encoding_2...,class_id1,class_id2...,startup/post startup bitmap
+ *   method_encoding_1,method_encoding_2...,class_id1,class_id2...,startup/post startup bitmap,
+ *   num_classes,class_counters,num_methods,method_counters
+ * The aggregation counters are only stored if the profile version is kProfileVersionWithCounters.
  * The method_encoding is:
  *    method_id,number_of_inline_caches,inline_cache1,inline_cache2...
  * The inline_cache is:
@@ -355,7 +376,7 @@
   if (!WriteBuffer(fd, kProfileMagic, sizeof(kProfileMagic))) {
     return false;
   }
-  if (!WriteBuffer(fd, kProfileVersion, sizeof(kProfileVersion))) {
+  if (!WriteBuffer(fd, version_, sizeof(version_))) {
     return false;
   }
   DCHECK_LE(info_.size(), std::numeric_limits<uint8_t>::max());
@@ -370,7 +391,17 @@
         sizeof(uint16_t) * dex_data.class_set.size() +
         methods_region_size +
         dex_data.bitmap_storage.size();
+    if (StoresAggregationCounters()) {
+      required_capacity += sizeof(uint16_t) +  // num class counters
+          sizeof(uint16_t) * dex_data.class_set.size() +
+          sizeof(uint16_t) +  // num method counter
+          sizeof(uint16_t) * dex_data_ptr->GetNumMethodCounters();
+    }
   }
+  if (StoresAggregationCounters()) {
+    required_capacity += sizeof(uint16_t);  // global counter
+  }
+
   // Allow large profiles for non target builds for the case where we are merging many profiles
   // to generate a boot image profile.
   if (kIsTargetBuild && required_capacity > kProfileSizeErrorThresholdInBytes) {
@@ -443,6 +474,24 @@
     buffer.insert(buffer.end(),
                   dex_data.bitmap_storage.begin(),
                   dex_data.bitmap_storage.end());
+
+    if (StoresAggregationCounters()) {
+      AddUintToBuffer(&buffer, static_cast<uint16_t>(dex_data.class_set.size()));
+      for (const auto& class_id : dex_data.class_set) {
+        uint16_t type_idx = class_id.index_;
+        AddUintToBuffer(&buffer, dex_data.class_counters[type_idx]);
+      }
+      AddUintToBuffer(&buffer, dex_data.GetNumMethodCounters());
+      for (uint16_t method_idx = 0; method_idx < dex_data.num_method_ids; method_idx++) {
+        if (dex_data.GetHotnessInfo(method_idx).IsInProfile()) {
+          AddUintToBuffer(&buffer, dex_data.method_counters[method_idx]);
+        }
+      }
+    }
+  }
+
+  if (StoresAggregationCounters()) {
+    AddUintToBuffer(&buffer, aggregation_count_);
   }
 
   uint32_t output_size = 0;
@@ -583,7 +632,8 @@
         profile_key,
         checksum,
         profile_index,
-        num_method_ids);
+        num_method_ids,
+        StoresAggregationCounters());
     info_.push_back(dex_file_data);
   }
   DexFileData* result = info_[profile_index];
@@ -943,7 +993,7 @@
   // Read magic and version
   const size_t kMagicVersionSize =
     sizeof(kProfileMagic) +
-    sizeof(kProfileVersion) +
+    kProfileVersionSize +
     sizeof(uint8_t) +  // number of dex files
     sizeof(uint32_t) +  // size of uncompressed profile data
     sizeof(uint32_t);  // size of compressed profile data
@@ -959,10 +1009,18 @@
     *error = "Profile missing magic";
     return kProfileLoadVersionMismatch;
   }
-  if (!safe_buffer.CompareAndAdvance(kProfileVersion, sizeof(kProfileVersion))) {
+  if (safe_buffer.CountUnreadBytes() < kProfileVersionSize) {
+     *error = "Cannot read profile version";
+     return kProfileLoadBadData;
+  }
+  memcpy(version_, safe_buffer.GetCurrentPtr(), kProfileVersionSize);
+  safe_buffer.Advance(kProfileVersionSize);
+  if ((memcmp(version_, kProfileVersion, kProfileVersionSize) != 0) &&
+      (memcmp(version_, kProfileVersionWithCounters, kProfileVersionSize) != 0)) {
     *error = "Profile version mismatch";
     return kProfileLoadVersionMismatch;
   }
+
   if (!safe_buffer.ReadUintAndAdvance<uint8_t>(number_of_dex_files)) {
     *error = "Cannot read the number of dex files";
     return kProfileLoadBadData;
@@ -1047,6 +1105,7 @@
     }
   }
 
+  // Read method bitmap.
   const size_t bytes = data->bitmap_storage.size();
   if (buffer.CountUnreadBytes() < bytes) {
     *error += "Profile EOF reached prematurely for ReadProfileHeaderDexLocation";
@@ -1055,10 +1114,51 @@
   const uint8_t* base_ptr = buffer.GetCurrentPtr();
   std::copy_n(base_ptr, bytes, data->bitmap_storage.data());
   buffer.Advance(bytes);
-  // Read method bitmap.
+
+  if (StoresAggregationCounters()) {
+    ReadAggregationCounters(buffer, *data, error);
+  }
+
   return kProfileLoadSuccess;
 }
 
+bool ProfileCompilationInfo::ReadAggregationCounters(
+      SafeBuffer& buffer,
+      DexFileData& dex_data,
+      /*out*/std::string* error) {
+  size_t unread_bytes_before_op = buffer.CountUnreadBytes();
+  size_t expected_byte_count = sizeof(uint16_t) *
+      (dex_data.class_set.size() + dex_data.method_map.size() + 2);
+  if (unread_bytes_before_op < expected_byte_count) {
+    *error += "Profile EOF reached prematurely for ReadAggregationCounters";
+    return false;
+  }
+
+  uint16_t num_class_counters;
+  READ_UINT(uint16_t, buffer, num_class_counters, error);
+  if (num_class_counters != dex_data.class_set.size()) {
+    *error = "Invalid class size when reading counters";
+    return false;
+  }
+  for (const auto& class_it : dex_data.class_set) {
+    READ_UINT(uint16_t, buffer, dex_data.class_counters[class_it.index_], error);
+  }
+
+  uint16_t num_method_counters;
+  READ_UINT(uint16_t, buffer, num_method_counters, error);
+  if (num_method_counters != dex_data.GetNumMethodCounters()) {
+    *error = "Invalid class size when reading counters";
+    return false;
+  }
+  for (uint16_t method_idx = 0; method_idx < dex_data.num_method_ids; method_idx++) {
+    if (dex_data.GetHotnessInfo(method_idx).IsInProfile()) {
+      READ_UINT(uint16_t, buffer, dex_data.method_counters[method_idx], error);
+    }
+  }
+
+  return true;
+}
+
 // TODO(calin): Fix this API. ProfileCompilationInfo::Load should be static and
 // return a unique pointer to a ProfileCompilationInfo upon success.
 bool ProfileCompilationInfo::Load(
@@ -1370,9 +1470,17 @@
     }
   }
 
+  if (StoresAggregationCounters()) {
+    if (!uncompressed_data.ReadUintAndAdvance<uint16_t>(&aggregation_count_)) {
+      *error = "Cannot read the global aggregation count";
+      return kProfileLoadBadData;
+    }
+  }
+
   // Check that we read everything and that profiles don't contain junk data.
   if (uncompressed_data.CountUnreadBytes() > 0) {
-    *error = "Unexpected content in the profile file";
+    *error = "Unexpected content in the profile file: " +
+        std::to_string(uncompressed_data.CountUnreadBytes()) + " extra bytes";
     return kProfileLoadBadData;
   } else {
     return kProfileLoadSuccess;
@@ -1518,6 +1626,33 @@
                                                                  other_dex_data->checksum));
     DCHECK(dex_data != nullptr);
 
+    // Merge counters for methods and class. Must be done before we merge the bitmaps so that
+    // we can tell if the data is new or not.
+    if (StoresAggregationCounters()) {
+      // Class aggregation counters.
+      if (merge_classes) {
+        for (const dex::TypeIndex& type_idx : other_dex_data->class_set) {
+          uint16_t amount = other.StoresAggregationCounters()
+              ? other_dex_data->class_counters[type_idx.index_]
+              : (dex_data->ContainsClass(type_idx) ? 1 : 0);
+
+          dex_data->class_counters[type_idx.index_] =
+              IncrementAggregationCounter(dex_data->class_counters[type_idx.index_], amount);
+        }
+      }
+
+      // Method aggregation counters.
+      for (uint16_t method_idx = 0; method_idx < other_dex_data->num_method_ids; method_idx++) {
+        if (other_dex_data->GetHotnessInfo(method_idx).IsInProfile()) {
+          uint16_t amount = other.StoresAggregationCounters()
+              ? other_dex_data->method_counters[method_idx]
+              : (dex_data->GetHotnessInfo(method_idx).IsInProfile() ? 1 : 0);
+          dex_data->method_counters[method_idx] =
+              IncrementAggregationCounter(dex_data->method_counters[method_idx], amount);
+        }
+      }
+    }
+
     // Merge the classes.
     if (merge_classes) {
       dex_data->class_set.insert(other_dex_data->class_set.begin(),
@@ -1552,6 +1687,13 @@
     // Merge the method bitmaps.
     dex_data->MergeBitmap(*other_dex_data);
   }
+
+  // Global aggregation counter.
+  if (StoresAggregationCounters()) {
+    uint16_t amount = other.StoresAggregationCounters() ? other.aggregation_count_ : 1;
+    aggregation_count_ = IncrementAggregationCounter(aggregation_count_, amount);
+  }
+
   return true;
 }
 
@@ -1614,11 +1756,7 @@
 
 bool ProfileCompilationInfo::ContainsClass(const DexFile& dex_file, dex::TypeIndex type_idx) const {
   const DexFileData* dex_data = FindDexData(&dex_file);
-  if (dex_data != nullptr) {
-    const ArenaSet<dex::TypeIndex>& classes = dex_data->class_set;
-    return classes.find(type_idx) != classes.end();
-  }
-  return false;
+  return (dex_data != nullptr) && dex_data->ContainsClass(type_idx);
 }
 
 uint32_t ProfileCompilationInfo::GetNumberOfMethods() const {
@@ -1753,6 +1891,9 @@
 bool ProfileCompilationInfo::Equals(const ProfileCompilationInfo& other) {
   // No need to compare profile_key_map_. That's only a cache for fast search.
   // All the information is already in the info_ vector.
+  if (memcmp(version_, other.version_, kProfileVersionSize) != 0) {
+    return false;
+  }
   if (info_.size() != other.info_.size()) {
     return false;
   }
@@ -1763,6 +1904,9 @@
       return false;
     }
   }
+  if (aggregation_count_ != other.aggregation_count_) {
+    return false;
+  }
   return true;
 }
 
@@ -1965,9 +2109,8 @@
   SetMethodHotness(index, flags);
 
   if ((flags & MethodHotness::kFlagHot) != 0) {
-    method_map.FindOrAdd(
-        index,
-        InlineCacheMap(std::less<uint16_t>(), allocator_->Adapter(kArenaAllocProfile)));
+    ProfileCompilationInfo::InlineCacheMap* result = FindOrAddMethod(index);
+    DCHECK(result != nullptr);
   }
   return true;
 }
@@ -2000,6 +2143,43 @@
   return ret;
 }
 
+int32_t ProfileCompilationInfo::DexFileData::GetMethodAggregationCounter(
+      uint16_t method_idx) const {
+  CHECK_GT(method_counters.size(), method_idx) << "Profile not prepared for aggregation counters";
+  if (!GetHotnessInfo(method_idx).IsInProfile()) {
+    return -1;
+  }
+
+  return method_counters[method_idx];
+}
+
+int32_t ProfileCompilationInfo::DexFileData::GetClassAggregationCounter(uint16_t type_idx) const {
+  CHECK_GT(class_counters.size(), type_idx) << "Profile not prepared for aggregation counters";
+  if (!ContainsClass(dex::TypeIndex(type_idx))) {
+    return -1;
+  }
+
+  return class_counters[type_idx];
+}
+
+int32_t ProfileCompilationInfo::GetMethodAggregationCounter(
+      const MethodReference& method_ref) const {
+  CHECK(StoresAggregationCounters()) << "Profile not prepared for aggregation counters";
+  const DexFileData* dex_data = FindDexData(method_ref.dex_file);
+  return dex_data == nullptr ? -1 : dex_data->GetMethodAggregationCounter(method_ref.index);
+}
+
+int32_t ProfileCompilationInfo::GetClassAggregationCounter(const TypeReference& type_ref) const {
+  CHECK(StoresAggregationCounters()) << "Profile not prepared for aggregation counters";
+  const DexFileData* dex_data = FindDexData(type_ref.dex_file);
+  return dex_data == nullptr ? -1 : dex_data->GetClassAggregationCounter(type_ref.index);
+}
+
+uint16_t ProfileCompilationInfo::GetAggregationCounter() const {
+  CHECK(StoresAggregationCounters()) << "Profile not prepared for aggregation counters";
+  return aggregation_count_;
+}
+
 ProfileCompilationInfo::DexPcData*
 ProfileCompilationInfo::FindOrAddDexPc(InlineCacheMap* inline_cache, uint32_t dex_pc) {
   return &(inline_cache->FindOrAdd(dex_pc, DexPcData(&allocator_))->second);
@@ -2096,4 +2276,46 @@
   profile_key_map_.clear();
 }
 
+bool ProfileCompilationInfo::StoresAggregationCounters() const {
+  return memcmp(version_, kProfileVersionWithCounters, sizeof(kProfileVersionWithCounters)) == 0;
+}
+
+void ProfileCompilationInfo::PrepareForAggregationCounters() {
+  InitProfileVersionInternal(kProfileVersionWithCounters);
+  for (DexFileData* dex_data : info_) {
+    dex_data->PrepareForAggregationCounters();
+  }
+}
+
+void ProfileCompilationInfo::DexFileData::PrepareForAggregationCounters() {
+  method_counters.resize(num_method_ids);
+  // TODO(calin): we should store the maximum number of types in the profile.
+  // It will simplify quite a few things and make this storage allocation
+  // more efficient.
+  size_t max_elems = 1 << (kBitsPerByte * sizeof(uint16_t));
+  class_counters.resize(max_elems);
+}
+
+const uint8_t* ProfileCompilationInfo::GetVersion() const {
+  return version_;
+}
+
+void ProfileCompilationInfo::InitProfileVersionInternal(const uint8_t version[]) {
+  CHECK(
+      (memcmp(version, kProfileVersion, kProfileVersionSize) == 0) ||
+      (memcmp(version, kProfileVersionWithCounters, kProfileVersionSize) == 0));
+  memcpy(version_, version, kProfileVersionSize);
+}
+
+uint16_t ProfileCompilationInfo::DexFileData::GetNumMethodCounters() const {
+  uint16_t num_method_counters = 0;
+  for (uint16_t method_idx = 0; method_idx < num_method_ids; method_idx++) {
+    num_method_counters += GetHotnessInfo(method_idx).IsInProfile() ? 1 : 0;
+  }
+  return num_method_counters;
+}
+
+bool ProfileCompilationInfo::DexFileData::ContainsClass(const dex::TypeIndex type_index) const {
+  return class_set.find(type_index) != class_set.end();
+}
 }  // namespace art
diff --git a/libprofile/profile/profile_compilation_info.h b/libprofile/profile/profile_compilation_info.h
index 92fa098..fa4615b 100644
--- a/libprofile/profile/profile_compilation_info.h
+++ b/libprofile/profile/profile_compilation_info.h
@@ -73,9 +73,10 @@
  public:
   static const uint8_t kProfileMagic[];
   static const uint8_t kProfileVersion[];
-
+  static const uint8_t kProfileVersionWithCounters[];
   static const char kDexMetadataProfileEntry[];
 
+  static constexpr size_t kProfileVersionSize = 4;
   static constexpr uint8_t kIndividualInlineCacheSize = 5;
 
   // Data structures for encoding the offline representation of inline caches.
@@ -447,6 +448,30 @@
   // Clears all the data from the profile.
   void ClearData();
 
+  // Prepare the profile to store aggregation counters.
+  // This will change the profile version and allocate extra storage for the counters.
+  // It allocates 2 bytes for every possible method and class, so do not use in performance
+  // critical code which needs to be memory efficient.
+  void PrepareForAggregationCounters();
+
+  // Returns true if the profile is configured to store aggregation counters.
+  bool StoresAggregationCounters() const;
+
+  // Returns the aggregation counter for the given method.
+  // Returns -1 if the method is not in the profile.
+  // CHECKs that the profile is configured to store aggregations counters.
+  int32_t GetMethodAggregationCounter(const MethodReference& method_ref) const;
+  // Returns the aggregation counter for the given class.
+  // Returns -1 if the class is not in the profile.
+  // CHECKs that the profile is configured to store aggregations counters.
+  int32_t GetClassAggregationCounter(const TypeReference& type_ref) const;
+  // Returns the number of times the profile was merged.
+  // CHECKs that the profile is configured to store aggregations counters.
+  uint16_t GetAggregationCounter() const;
+
+  // Return the version of this profile.
+  const uint8_t* GetVersion() const;
+
  private:
   enum ProfileLoadStatus {
     kProfileLoadWouldOverwiteData,
@@ -470,7 +495,8 @@
                 const std::string& key,
                 uint32_t location_checksum,
                 uint16_t index,
-                uint32_t num_methods)
+                uint32_t num_methods,
+                bool store_aggregation_counters)
         : allocator_(allocator),
           profile_key(key),
           profile_index(index),
@@ -478,13 +504,18 @@
           method_map(std::less<uint16_t>(), allocator->Adapter(kArenaAllocProfile)),
           class_set(std::less<dex::TypeIndex>(), allocator->Adapter(kArenaAllocProfile)),
           num_method_ids(num_methods),
-          bitmap_storage(allocator->Adapter(kArenaAllocProfile)) {
+          bitmap_storage(allocator->Adapter(kArenaAllocProfile)),
+          method_counters(allocator->Adapter(kArenaAllocProfile)),
+          class_counters(allocator->Adapter(kArenaAllocProfile)) {
       bitmap_storage.resize(ComputeBitmapStorage(num_method_ids));
       if (!bitmap_storage.empty()) {
         method_bitmap =
             BitMemoryRegion(MemoryRegion(
                 &bitmap_storage[0], bitmap_storage.size()), 0, ComputeBitmapBits(num_method_ids));
       }
+      if (store_aggregation_counters) {
+        PrepareForAggregationCounters();
+      }
     }
 
     static size_t ComputeBitmapBits(uint32_t num_method_ids) {
@@ -495,7 +526,13 @@
     }
 
     bool operator==(const DexFileData& other) const {
-      return checksum == other.checksum && method_map == other.method_map;
+      return checksum == other.checksum &&
+          num_method_ids == other.num_method_ids &&
+          method_map == other.method_map &&
+          class_set == other.class_set &&
+          (BitMemoryRegion::Compare(method_bitmap, other.method_bitmap) == 0) &&
+          class_counters == other.class_counters &&
+          method_counters == other.method_counters;
     }
 
     // Mark a method as executed at least once.
@@ -510,6 +547,14 @@
 
     void SetMethodHotness(size_t index, MethodHotness::Flag flags);
     MethodHotness GetHotnessInfo(uint32_t dex_method_index) const;
+    void PrepareForAggregationCounters();
+
+    int32_t GetMethodAggregationCounter(uint16_t method_index) const;
+    int32_t GetClassAggregationCounter(uint16_t type_index) const;
+
+    uint16_t GetNumMethodCounters() const;
+
+    bool ContainsClass(const dex::TypeIndex type_index) const;
 
     // The allocator used to allocate new inline cache maps.
     ArenaAllocator* const allocator_;
@@ -519,7 +564,7 @@
     uint8_t profile_index;
     // The dex checksum.
     uint32_t checksum;
-    // The methonds' profile information.
+    // The methods' profile information.
     MethodMap method_map;
     // The classes which have been profiled. Note that these don't necessarily include
     // all the classes that can be found in the inline caches reference.
@@ -531,6 +576,8 @@
     uint32_t num_method_ids;
     ArenaVector<uint8_t> bitmap_storage;
     BitMemoryRegion method_bitmap;
+    ArenaVector<uint16_t> method_counters;
+    ArenaVector<uint16_t> class_counters;
 
    private:
     enum BitmapIndex {
@@ -761,6 +808,11 @@
                    const SafeMap<uint8_t, uint8_t>& dex_profile_index_remap,
                    /*out*/std::string* error);
 
+  // Read the aggregation counters from the buffer.
+  bool ReadAggregationCounters(SafeBuffer& buffer,
+                               DexFileData& dex_data,
+                               /*out*/std::string* error);
+
   // The method generates mapping of profile indices while merging a new profile
   // data into current data. It returns true, if the mapping was successful.
   bool RemapProfileIndex(const std::vector<ProfileLineHeader>& profile_line_headers,
@@ -792,6 +844,9 @@
   // if no previous data exists.
   DexPcData* FindOrAddDexPc(InlineCacheMap* inline_cache, uint32_t dex_pc);
 
+  // Initializes the profile version to the desired one.
+  void InitProfileVersionInternal(const uint8_t version[]);
+
   friend class ProfileCompilationInfoTest;
   friend class CompilerDriverProfileTest;
   friend class ProfileAssistantTest;
@@ -809,6 +864,14 @@
   // This is used to speed up searches since it avoids iterating
   // over the info_ vector when searching by profile key.
   ArenaSafeMap<const std::string, uint8_t> profile_key_map_;
+
+  // The version of the profile.
+  // This may change if a "normal" profile is transformed to keep track
+  // of aggregation counters.
+  uint8_t version_[kProfileVersionSize];
+
+  // Stored only when the profile is configured to keep track of aggregation counters.
+  uint16_t aggregation_count_;
 };
 
 }  // namespace art
diff --git a/libprofile/profile/profile_compilation_info_test.cc b/libprofile/profile/profile_compilation_info_test.cc
index a2bfe50..47019c4 100644
--- a/libprofile/profile/profile_compilation_info_test.cc
+++ b/libprofile/profile/profile_compilation_info_test.cc
@@ -1141,4 +1141,180 @@
   ASSERT_TRUE(loaded_info.Equals(info));
 }
 
+TEST_F(ProfileCompilationInfoTest, PrepareForAggregationCounters) {
+  ProfileCompilationInfo info;
+  ASSERT_EQ(
+      memcmp(info.GetVersion(),
+             ProfileCompilationInfo::kProfileVersion,
+             ProfileCompilationInfo::kProfileVersionSize),
+      0);
+
+  info.PrepareForAggregationCounters();
+
+  ASSERT_EQ(
+      memcmp(info.GetVersion(),
+             ProfileCompilationInfo::kProfileVersionWithCounters,
+             ProfileCompilationInfo::kProfileVersionSize),
+      0);
+  ASSERT_TRUE(info.StoresAggregationCounters());
+  ASSERT_EQ(info.GetAggregationCounter(), 0);
+}
+
+TEST_F(ProfileCompilationInfoTest, MergeWithAggregationCounters) {
+  ProfileCompilationInfo info1;
+  info1.PrepareForAggregationCounters();
+
+  ProfileCompilationInfo info2;
+  ProfileCompilationInfo info3;
+
+  std::unique_ptr<const DexFile> dex(OpenTestDexFile("ManyMethods"));
+  std::string location = dex->GetLocation();
+  int checksum = dex->GetLocationChecksum();
+
+  AddMethod(location, checksum, /* method_idx= */ 1, &info1);
+
+  AddMethod(location, checksum, /* method_idx= */ 2, &info1);
+  AddMethod(location, checksum, /* method_idx= */ 2, &info2);
+
+  info1.AddMethodIndex(Hotness::kFlagStartup, location, checksum, 3, kMaxMethodIds);
+  info2.AddMethodIndex(Hotness::kFlagPostStartup, location, checksum, 3, kMaxMethodIds);
+  info3.AddMethodIndex(Hotness::kFlagStartup, location, checksum, 3, kMaxMethodIds);
+
+  AddMethod(location, checksum, /* method_idx= */ 6, &info2);
+  AddMethod(location, checksum, /* method_idx= */ 6, &info3);
+
+  AddClass(location, checksum, dex::TypeIndex(10), &info1);
+
+  AddClass(location, checksum, dex::TypeIndex(20), &info1);
+  AddClass(location, checksum, dex::TypeIndex(20), &info2);
+
+  AddClass(location, checksum, dex::TypeIndex(30), &info1);
+  AddClass(location, checksum, dex::TypeIndex(30), &info2);
+  AddClass(location, checksum, dex::TypeIndex(30), &info3);
+
+  ASSERT_EQ(info1.GetAggregationCounter(), 0);
+  info1.MergeWith(info2);
+  ASSERT_EQ(info1.GetAggregationCounter(), 1);
+  info1.MergeWith(info3);
+  ASSERT_EQ(info1.GetAggregationCounter(), 2);
+
+  ASSERT_EQ(0, info1.GetMethodAggregationCounter(MethodReference(dex.get(), 1)));
+  ASSERT_EQ(1, info1.GetMethodAggregationCounter(MethodReference(dex.get(), 2)));
+  ASSERT_EQ(2, info1.GetMethodAggregationCounter(MethodReference(dex.get(), 3)));
+  ASSERT_EQ(1, info1.GetMethodAggregationCounter(MethodReference(dex.get(), 6)));
+
+  ASSERT_EQ(0, info1.GetClassAggregationCounter(TypeReference(dex.get(), dex::TypeIndex(10))));
+  ASSERT_EQ(1, info1.GetClassAggregationCounter(TypeReference(dex.get(), dex::TypeIndex(20))));
+  ASSERT_EQ(2, info1.GetClassAggregationCounter(TypeReference(dex.get(), dex::TypeIndex(30))));
+
+  // Check methods that do not exists.
+  ASSERT_EQ(-1, info1.GetMethodAggregationCounter(MethodReference(dex.get(), 4)));
+  ASSERT_EQ(-1, info1.GetClassAggregationCounter(TypeReference(dex.get(), dex::TypeIndex(40))));
+}
+
+TEST_F(ProfileCompilationInfoTest, SaveAndLoadAggregationCounters) {
+  ProfileCompilationInfo info1;
+  info1.PrepareForAggregationCounters();
+
+  ProfileCompilationInfo info2;
+  ProfileCompilationInfo info3;
+
+  std::unique_ptr<const DexFile> dex(OpenTestDexFile("ManyMethods"));
+  std::string location = dex->GetLocation();
+  int checksum = dex->GetLocationChecksum();
+
+  AddMethod(location, checksum, /* method_idx= */ 1, &info1);
+
+  AddMethod(location, checksum, /* method_idx= */ 2, &info1);
+  AddMethod(location, checksum, /* method_idx= */ 2, &info2);
+
+  info1.AddMethodIndex(Hotness::kFlagStartup, location, checksum, 3, kMaxMethodIds);
+  info2.AddMethodIndex(Hotness::kFlagPostStartup, location, checksum, 3, kMaxMethodIds);
+  info3.AddMethodIndex(Hotness::kFlagStartup, location, checksum, 3, kMaxMethodIds);
+
+  AddMethod(location, checksum, /* method_idx= */ 6, &info2);
+  AddMethod(location, checksum, /* method_idx= */ 6, &info3);
+
+  AddClass(location, checksum, dex::TypeIndex(10), &info1);
+
+  AddClass(location, checksum, dex::TypeIndex(20), &info1);
+  AddClass(location, checksum, dex::TypeIndex(20), &info2);
+
+  AddClass(location, checksum, dex::TypeIndex(30), &info1);
+  AddClass(location, checksum, dex::TypeIndex(30), &info2);
+  AddClass(location, checksum, dex::TypeIndex(30), &info3);
+
+  info1.MergeWith(info2);
+  info1.MergeWith(info3);
+
+  ScratchFile profile;
+
+  ASSERT_TRUE(info1.Save(GetFd(profile)));
+  ASSERT_EQ(0, profile.GetFile()->Flush());
+
+  // Check that we get back what we saved.
+  ProfileCompilationInfo loaded_info;
+  loaded_info.PrepareForAggregationCounters();
+  ASSERT_TRUE(profile.GetFile()->ResetOffset());
+  ASSERT_TRUE(loaded_info.Load(GetFd(profile)));
+  ASSERT_TRUE(loaded_info.Equals(info1));
+
+  ASSERT_EQ(2, loaded_info.GetAggregationCounter());
+
+  ASSERT_EQ(0, loaded_info.GetMethodAggregationCounter(MethodReference(dex.get(), 1)));
+  ASSERT_EQ(1, loaded_info.GetMethodAggregationCounter(MethodReference(dex.get(), 2)));
+  ASSERT_EQ(2, loaded_info.GetMethodAggregationCounter(MethodReference(dex.get(), 3)));
+  ASSERT_EQ(1, loaded_info.GetMethodAggregationCounter(MethodReference(dex.get(), 6)));
+
+  ASSERT_EQ(0, loaded_info.GetClassAggregationCounter(TypeReference(dex.get(), dex::TypeIndex(10))));
+  ASSERT_EQ(1, loaded_info.GetClassAggregationCounter(TypeReference(dex.get(), dex::TypeIndex(20))));
+  ASSERT_EQ(2, loaded_info.GetClassAggregationCounter(TypeReference(dex.get(), dex::TypeIndex(30))));
+}
+
+TEST_F(ProfileCompilationInfoTest, MergeTwoWithAggregationCounters) {
+  ProfileCompilationInfo info1;
+  info1.PrepareForAggregationCounters();
+
+  ProfileCompilationInfo info2;
+
+  std::unique_ptr<const DexFile> dex(OpenTestDexFile("ManyMethods"));
+  std::string location = dex->GetLocation();
+  int checksum = dex->GetLocationChecksum();
+
+  AddMethod(location, checksum, /* method_idx= */ 1, &info1);
+
+  AddMethod(location, checksum, /* method_idx= */ 2, &info1);
+  AddMethod(location, checksum, /* method_idx= */ 2, &info2);
+
+  AddClass(location, checksum, dex::TypeIndex(20), &info1);
+
+  AddClass(location, checksum, dex::TypeIndex(10), &info1);
+  AddClass(location, checksum, dex::TypeIndex(10), &info2);
+
+  info1.MergeWith(info2);
+  info1.MergeWith(info2);
+  ASSERT_EQ(2, info1.GetAggregationCounter());
+
+  // Save and load the profile to create a copy of the data
+  ScratchFile profile;
+  info1.Save(GetFd(profile));
+  ASSERT_EQ(0, profile.GetFile()->Flush());
+
+  ProfileCompilationInfo loaded_info;
+  loaded_info.PrepareForAggregationCounters();
+  profile.GetFile()->ResetOffset();
+  loaded_info.Load(GetFd(profile));
+
+  // Merge the data
+  info1.MergeWith(loaded_info);
+
+  ASSERT_EQ(4, info1.GetAggregationCounter());
+
+  ASSERT_EQ(0, info1.GetMethodAggregationCounter(MethodReference(dex.get(), 1)));
+  ASSERT_EQ(4, info1.GetMethodAggregationCounter(MethodReference(dex.get(), 2)));
+
+  ASSERT_EQ(4, info1.GetClassAggregationCounter(TypeReference(dex.get(), dex::TypeIndex(10))));
+  ASSERT_EQ(0, info1.GetClassAggregationCounter(TypeReference(dex.get(), dex::TypeIndex(20))));
+}
+
 }  // namespace art
diff --git a/profman/profile_assistant.cc b/profman/profile_assistant.cc
index 4dc5262..b65bb43 100644
--- a/profman/profile_assistant.cc
+++ b/profman/profile_assistant.cc
@@ -32,7 +32,8 @@
 ProfileAssistant::ProcessingResult ProfileAssistant::ProcessProfilesInternal(
         const std::vector<ScopedFlock>& profile_files,
         const ScopedFlock& reference_profile_file,
-        const ProfileCompilationInfo::ProfileLoadFilterFn& filter_fn) {
+        const ProfileCompilationInfo::ProfileLoadFilterFn& filter_fn,
+        bool store_aggregation_counters) {
   DCHECK(!profile_files.empty());
 
   ProfileCompilationInfo info;
@@ -42,6 +43,12 @@
     return kErrorBadProfiles;
   }
 
+  // If we need to store aggregation counters (e.g. for the boot image profile),
+  // prepare the reference profile now.
+  if (store_aggregation_counters) {
+    info.PrepareForAggregationCounters();
+  }
+
   // Store the current state of the reference profile before merging with the current profiles.
   uint32_t number_of_methods = info.GetNumberOfMethods();
   uint32_t number_of_classes = info.GetNumberOfResolvedClasses();
@@ -124,7 +131,8 @@
 ProfileAssistant::ProcessingResult ProfileAssistant::ProcessProfiles(
         const std::vector<int>& profile_files_fd,
         int reference_profile_file_fd,
-        const ProfileCompilationInfo::ProfileLoadFilterFn& filter_fn) {
+        const ProfileCompilationInfo::ProfileLoadFilterFn& filter_fn,
+        bool store_aggregation_counters) {
   DCHECK_GE(reference_profile_file_fd, 0);
 
   std::string error;
@@ -147,13 +155,15 @@
 
   return ProcessProfilesInternal(profile_files.Get(),
                                  reference_profile_file,
-                                 filter_fn);
+                                 filter_fn,
+                                 store_aggregation_counters);
 }
 
 ProfileAssistant::ProcessingResult ProfileAssistant::ProcessProfiles(
         const std::vector<std::string>& profile_files,
         const std::string& reference_profile_file,
-        const ProfileCompilationInfo::ProfileLoadFilterFn& filter_fn) {
+        const ProfileCompilationInfo::ProfileLoadFilterFn& filter_fn,
+        bool store_aggregation_counters) {
   std::string error;
 
   ScopedFlockList profile_files_list(profile_files.size());
@@ -171,7 +181,8 @@
 
   return ProcessProfilesInternal(profile_files_list.Get(),
                                  locked_reference_profile_file,
-                                 filter_fn);
+                                 filter_fn,
+                                 store_aggregation_counters);
 }
 
 }  // namespace art
diff --git a/profman/profile_assistant.h b/profman/profile_assistant.h
index c1d6f8e..45d4e38 100644
--- a/profman/profile_assistant.h
+++ b/profman/profile_assistant.h
@@ -55,19 +55,22 @@
       const std::vector<std::string>& profile_files,
       const std::string& reference_profile_file,
       const ProfileCompilationInfo::ProfileLoadFilterFn& filter_fn
-          = ProfileCompilationInfo::ProfileFilterFnAcceptAll);
+          = ProfileCompilationInfo::ProfileFilterFnAcceptAll,
+      bool store_aggregation_counters = false);
 
   static ProcessingResult ProcessProfiles(
       const std::vector<int>& profile_files_fd_,
       int reference_profile_file_fd,
       const ProfileCompilationInfo::ProfileLoadFilterFn& filter_fn
-          = ProfileCompilationInfo::ProfileFilterFnAcceptAll);
+          = ProfileCompilationInfo::ProfileFilterFnAcceptAll,
+      bool store_aggregation_counters = false);
 
  private:
   static ProcessingResult ProcessProfilesInternal(
       const std::vector<ScopedFlock>& profile_files,
       const ScopedFlock& reference_profile_file,
-      const ProfileCompilationInfo::ProfileLoadFilterFn& filter_fn);
+      const ProfileCompilationInfo::ProfileLoadFilterFn& filter_fn,
+      bool store_aggregation_counters);
 
   DISALLOW_COPY_AND_ASSIGN(ProfileAssistant);
 };
diff --git a/profman/profile_assistant_test.cc b/profman/profile_assistant_test.cc
index e9d3290..e906151 100644
--- a/profman/profile_assistant_test.cc
+++ b/profman/profile_assistant_test.cc
@@ -102,7 +102,7 @@
       }
     }
     for (uint16_t i = 0; i < number_of_classes; i++) {
-      ASSERT_TRUE(info->AddClassIndex(dex_location1,
+      ASSERT_TRUE(info->AddClassIndex(ProfileCompilationInfo::GetProfileDexFileKey(dex_location1),
                                       dex_location_checksum1,
                                       dex::TypeIndex(i),
                                       number_of_methods1));
@@ -1300,4 +1300,57 @@
   }
 }
 
+TEST_F(ProfileAssistantTest, MergeProfilesWithCounters) {
+  ScratchFile profile1;
+  ScratchFile profile2;
+  ScratchFile reference_profile;
+
+  // The new profile info will contain methods with indices 0-100.
+  const uint16_t kNumberOfMethodsToEnableCompilation = 100;
+  const uint16_t kNumberOfClasses = 50;
+
+  std::vector<std::unique_ptr<const DexFile>> dex_files = OpenTestDexFiles("ProfileTestMultiDex");
+  const DexFile& d1 = *dex_files[0];
+  const DexFile& d2 = *dex_files[1];
+  ProfileCompilationInfo info1;
+  SetupProfile(
+      d1.GetLocation(), d1.GetLocationChecksum(),
+      d2.GetLocation(), d2.GetLocationChecksum(),
+      kNumberOfMethodsToEnableCompilation, kNumberOfClasses, profile1, &info1);
+  ProfileCompilationInfo info2;
+  SetupProfile(
+      d1.GetLocation(), d1.GetLocationChecksum(),
+      d2.GetLocation(), d2.GetLocationChecksum(),
+      kNumberOfMethodsToEnableCompilation, kNumberOfClasses, profile2, &info2);
+
+  std::string profman_cmd = GetProfmanCmd();
+  std::vector<std::string> argv_str;
+  argv_str.push_back(profman_cmd);
+  argv_str.push_back("--profile-file-fd=" + std::to_string(profile1.GetFd()));
+  argv_str.push_back("--profile-file-fd=" + std::to_string(profile2.GetFd()));
+  argv_str.push_back("--reference-profile-file-fd=" + std::to_string(reference_profile.GetFd()));
+  argv_str.push_back("--store-aggregation-counters");
+  std::string error;
+
+  EXPECT_EQ(ExecAndReturnCode(argv_str, &error), 0) << error;
+
+  // Verify that we can load the result and that the counters are in place.
+
+  ProfileCompilationInfo result;
+  result.PrepareForAggregationCounters();
+  ASSERT_TRUE(reference_profile.GetFile()->ResetOffset());
+  ASSERT_TRUE(result.Load(reference_profile.GetFd()));
+
+  ASSERT_TRUE(result.StoresAggregationCounters());
+  ASSERT_EQ(2, result.GetAggregationCounter());
+
+  for (uint16_t i = 0; i < kNumberOfMethodsToEnableCompilation; i++) {
+    ASSERT_EQ(1, result.GetMethodAggregationCounter(MethodReference(&d1, i)));
+    ASSERT_EQ(1, result.GetMethodAggregationCounter(MethodReference(&d2, i)));
+  }
+  for (uint16_t i = 0; i < kNumberOfClasses; i++) {
+    ASSERT_EQ(1, result.GetClassAggregationCounter(TypeReference(&d1, dex::TypeIndex(i))));
+  }
+}
+
 }  // namespace art
diff --git a/profman/profman.cc b/profman/profman.cc
index 2935a05..a0c387d 100644
--- a/profman/profman.cc
+++ b/profman/profman.cc
@@ -157,6 +157,9 @@
   UsageError("      the file passed with --profile-fd(file) to the profile passed with");
   UsageError("      --reference-profile-fd(file) and update at the same time the profile-key");
   UsageError("      of entries corresponding to the apks passed with --apk(-fd).");
+  UsageError("  --store-aggregation-counters: if present, profman will compute and store");
+  UsageError("      the aggregation counters of classes and methods in the output profile.");
+  UsageError("      In this case the profile will have a different version.");
   UsageError("");
 
   exit(EXIT_FAILURE);
@@ -200,7 +203,8 @@
       test_profile_class_percentage_(kDefaultTestProfileClassPercentage),
       test_profile_seed_(NanoTime()),
       start_ns_(NanoTime()),
-      copy_and_update_profile_key_(false) {}
+      copy_and_update_profile_key_(false),
+      store_aggregation_counters_(false) {}
 
   ~ProfMan() {
     LogCompletionTime();
@@ -287,6 +291,8 @@
         ParseUintOption(option, "--generate-test-profile-seed", &test_profile_seed_, Usage);
       } else if (option.starts_with("--copy-and-update-profile-key")) {
         copy_and_update_profile_key_ = true;
+      } else if (option.starts_with("--store-aggregation-counters")) {
+        store_aggregation_counters_ = true;
       } else {
         Usage("Unknown argument '%s'", option.data());
       }
@@ -363,12 +369,14 @@
       File file(reference_profile_file_fd_, false);
       result = ProfileAssistant::ProcessProfiles(profile_files_fd_,
                                                  reference_profile_file_fd_,
-                                                 filter_fn);
+                                                 filter_fn,
+                                                 store_aggregation_counters_);
       CloseAllFds(profile_files_fd_, "profile_files_fd_");
     } else {
       result = ProfileAssistant::ProcessProfiles(profile_files_,
                                                  reference_profile_file_,
-                                                 filter_fn);
+                                                 filter_fn,
+                                                 store_aggregation_counters_);
     }
     return result;
   }
@@ -1279,6 +1287,7 @@
   uint32_t test_profile_seed_;
   uint64_t start_ns_;
   bool copy_and_update_profile_key_;
+  bool store_aggregation_counters_;
 };
 
 // See ProfileAssistant::ProcessingResult for return codes.
diff --git a/runtime/base/locks.cc b/runtime/base/locks.cc
index cfc9f1d..a7922a2 100644
--- a/runtime/base/locks.cc
+++ b/runtime/base/locks.cc
@@ -61,6 +61,7 @@
 Mutex* Locks::reference_queue_soft_references_lock_ = nullptr;
 Mutex* Locks::reference_queue_weak_references_lock_ = nullptr;
 Mutex* Locks::runtime_shutdown_lock_ = nullptr;
+Mutex* Locks::runtime_thread_pool_lock_ = nullptr;
 Mutex* Locks::cha_lock_ = nullptr;
 Mutex* Locks::subtype_check_lock_ = nullptr;
 Mutex* Locks::thread_list_lock_ = nullptr;
@@ -154,6 +155,7 @@
     DCHECK(user_code_suspension_lock_ != nullptr);
     DCHECK(dex_lock_ != nullptr);
     DCHECK(native_debug_interface_lock_ != nullptr);
+    DCHECK(runtime_thread_pool_lock_ != nullptr);
   } else {
     // Create global locks in level order from highest lock level to lowest.
     LockLevel current_lock_level = kInstrumentEntrypointsLock;
@@ -189,6 +191,10 @@
     DCHECK(runtime_shutdown_lock_ == nullptr);
     runtime_shutdown_lock_ = new Mutex("runtime shutdown lock", current_lock_level);
 
+    UPDATE_CURRENT_LOCK_LEVEL(kRuntimeThreadPoolLock);
+    DCHECK(runtime_thread_pool_lock_ == nullptr);
+    runtime_thread_pool_lock_ = new Mutex("runtime thread pool lock", current_lock_level);
+
     UPDATE_CURRENT_LOCK_LEVEL(kProfilerLock);
     DCHECK(profiler_lock_ == nullptr);
     profiler_lock_ = new Mutex("profiler lock", current_lock_level);
diff --git a/runtime/base/locks.h b/runtime/base/locks.h
index 8cbe372..57719f1 100644
--- a/runtime/base/locks.h
+++ b/runtime/base/locks.h
@@ -117,6 +117,7 @@
   kJdwpEventListLock,
   kJdwpAttachLock,
   kJdwpStartLock,
+  kRuntimeThreadPoolLock,
   kRuntimeShutdownLock,
   kTraceLock,
   kHeapBitmapLock,
@@ -224,8 +225,11 @@
   // Guards shutdown of the runtime.
   static Mutex* runtime_shutdown_lock_ ACQUIRED_AFTER(heap_bitmap_lock_);
 
+  // Runtime thread pool lock.
+  static Mutex* runtime_thread_pool_lock_ ACQUIRED_AFTER(runtime_shutdown_lock_);
+
   // Guards background profiler global state.
-  static Mutex* profiler_lock_ ACQUIRED_AFTER(runtime_shutdown_lock_);
+  static Mutex* profiler_lock_ ACQUIRED_AFTER(runtime_thread_pool_lock_);
 
   // Guards trace (ie traceview) requests.
   static Mutex* trace_lock_ ACQUIRED_AFTER(profiler_lock_);
diff --git a/runtime/gc/heap.cc b/runtime/gc/heap.cc
index bf8aaae..8f9967f 100644
--- a/runtime/gc/heap.cc
+++ b/runtime/gc/heap.cc
@@ -1416,6 +1416,11 @@
   TrimSpaces(self);
   // Trim arenas that may have been used by JIT or verifier.
   runtime->GetArenaPool()->TrimMaps();
+  {
+    // TODO: Move this to a callback called when startup is finished (b/120671223).
+    ScopedTrace trace2("Delete thread pool");
+    runtime->DeleteThreadPool();
+  }
 }
 
 class TrimIndirectReferenceTableClosure : public Closure {
diff --git a/runtime/gc/space/image_space.cc b/runtime/gc/space/image_space.cc
index 66db063..9080ed6 100644
--- a/runtime/gc/space/image_space.cc
+++ b/runtime/gc/space/image_space.cc
@@ -21,7 +21,6 @@
 #include <unistd.h>
 
 #include <random>
-#include <thread>
 
 #include "android-base/stringprintf.h"
 #include "android-base/strings.h"
@@ -685,40 +684,12 @@
       REQUIRES_SHARED(Locks::mutator_lock_) {
     TimingLogger logger(__PRETTY_FUNCTION__, /*precise=*/ true, VLOG_IS_ON(image));
 
-    std::unique_ptr<ThreadPool> thread_pool;
     std::unique_ptr<ImageSpace> space = Init(image_filename,
                                              image_location,
                                              oat_file,
                                              &logger,
-                                             &thread_pool,
                                              image_reservation,
                                              error_msg);
-    if (thread_pool != nullptr) {
-      // Delay the thread pool deletion to prevent the deletion slowing down the startup by causing
-      // preemption. TODO: Just do this in heap trim.
-      static constexpr uint64_t kThreadPoolDeleteDelay = MsToNs(5000);
-
-      class DeleteThreadPoolTask : public HeapTask {
-       public:
-        explicit DeleteThreadPoolTask(std::unique_ptr<ThreadPool>&& thread_pool)
-            : HeapTask(NanoTime() + kThreadPoolDeleteDelay), thread_pool_(std::move(thread_pool)) {}
-
-        void Run(Thread* self) override {
-          ScopedTrace trace("DestroyThreadPool");
-          ScopedThreadStateChange stsc(self, kNative);
-          thread_pool_.reset();
-        }
-
-       private:
-        std::unique_ptr<ThreadPool> thread_pool_;
-      };
-      gc::TaskProcessor* const processor = Runtime::Current()->GetHeap()->GetTaskProcessor();
-      // The thread pool is already done being used since Init has finished running. Deleting the
-      // thread pool is done async since it takes a non-trivial amount of time to do.
-      if (processor != nullptr) {
-        processor->AddTask(Thread::Current(), new DeleteThreadPoolTask(std::move(thread_pool)));
-      }
-    }
     if (space != nullptr) {
       uint32_t expected_reservation_size =
           RoundUp(space->GetImageHeader().GetImageSize(), kPageSize);
@@ -779,7 +750,6 @@
                                           const char* image_location,
                                           const OatFile* oat_file,
                                           TimingLogger* logger,
-                                          std::unique_ptr<ThreadPool>* thread_pool,
                                           /*inout*/MemMap* image_reservation,
                                           /*out*/std::string* error_msg)
       REQUIRES_SHARED(Locks::mutator_lock_) {
@@ -856,18 +826,6 @@
       return nullptr;
     }
 
-    const size_t kMinBlocks = 2;
-    if (thread_pool != nullptr && image_header->GetBlockCount() >= kMinBlocks) {
-      TimingLogger::ScopedTiming timing("CreateThreadPool", logger);
-      ScopedThreadStateChange stsc(Thread::Current(), kNative);
-      constexpr size_t kStackSize = 64 * KB;
-      constexpr size_t kMaxRuntimeWorkers = 4u;
-      const size_t num_workers =
-          std::min(static_cast<size_t>(std::thread::hardware_concurrency()), kMaxRuntimeWorkers);
-      thread_pool->reset(new ThreadPool("Image", num_workers, /*create_peers=*/false, kStackSize));
-      thread_pool->get()->StartWorkers(Thread::Current());
-    }
-
     // GetImageBegin is the preferred address to map the image. If we manage to map the
     // image at the image begin, the amount of fixup work required is minimized.
     // If it is pic we will retry with error_msg for the failure case. Pass a null error_msg to
@@ -880,7 +838,6 @@
         *image_header,
         file->Fd(),
         logger,
-        thread_pool != nullptr ? thread_pool->get() : nullptr,
         image_reservation,
         error_msg);
     if (!map.IsValid()) {
@@ -971,7 +928,6 @@
                               const ImageHeader& image_header,
                               int fd,
                               TimingLogger* logger,
-                              ThreadPool* pool,
                               /*inout*/MemMap* image_reservation,
                               /*out*/std::string* error_msg) {
     TimingLogger::ScopedTiming timing("MapImageFile", logger);
@@ -1015,9 +971,12 @@
       }
       memcpy(map.Begin(), &image_header, sizeof(ImageHeader));
 
+      Runtime::ScopedThreadPoolUsage stpu;
+      ThreadPool* const pool = stpu.GetThreadPool();
       const uint64_t start = NanoTime();
       Thread* const self = Thread::Current();
-      const bool use_parallel = pool != nullptr;
+      static constexpr size_t kMinBlocks = 2u;
+      const bool use_parallel = pool != nullptr && image_header.GetBlockCount() >= kMinBlocks;
       for (const ImageHeader::Block& block : image_header.GetBlocks(temp_map.Begin())) {
         auto function = [&](Thread*) {
           const uint64_t start2 = NanoTime();
@@ -1062,32 +1021,30 @@
           app_oat_(app_oat) {}
 
     // Return the relocated address of a heap object.
+    // Null checks must be performed in the caller (for performance reasons).
     template <typename T>
     ALWAYS_INLINE T* ForwardObject(T* src) const {
+      DCHECK(src != nullptr);
       const uintptr_t uint_src = reinterpret_cast<uintptr_t>(src);
       if (boot_image_.InSource(uint_src)) {
         return reinterpret_cast<T*>(boot_image_.ToDest(uint_src));
       }
-      if (app_image_.InSource(uint_src)) {
-        return reinterpret_cast<T*>(app_image_.ToDest(uint_src));
-      }
       // Since we are fixing up the app image, there should only be pointers to the app image and
       // boot image.
-      DCHECK(src == nullptr) << reinterpret_cast<const void*>(src);
-      return src;
+      DCHECK(app_image_.InSource(uint_src)) << reinterpret_cast<const void*>(src);
+      return reinterpret_cast<T*>(app_image_.ToDest(uint_src));
     }
 
     // Return the relocated address of a code pointer (contained by an oat file).
+    // Null checks must be performed in the caller (for performance reasons).
     ALWAYS_INLINE const void* ForwardCode(const void* src) const {
+      DCHECK(src != nullptr);
       const uintptr_t uint_src = reinterpret_cast<uintptr_t>(src);
       if (boot_image_.InSource(uint_src)) {
         return reinterpret_cast<const void*>(boot_image_.ToDest(uint_src));
       }
-      if (app_oat_.InSource(uint_src)) {
-        return reinterpret_cast<const void*>(app_oat_.ToDest(uint_src));
-      }
-      DCHECK(src == nullptr) << src;
-      return src;
+      DCHECK(app_oat_.InSource(uint_src)) << src;
+      return reinterpret_cast<const void*>(app_oat_.ToDest(uint_src));
     }
 
     // Must be called on pointers that already have been relocated to the destination relocation.
@@ -1157,9 +1114,12 @@
       // Space is not yet added to the heap, don't do a read barrier.
       mirror::Object* ref = obj->GetFieldObject<mirror::Object, kVerifyNone, kWithoutReadBarrier>(
           offset);
-      // Use SetFieldObjectWithoutWriteBarrier to avoid card marking since we are writing to the
-      // image.
-      obj->SetFieldObjectWithoutWriteBarrier<false, true, kVerifyNone>(offset, ForwardObject(ref));
+      if (ref != nullptr) {
+        // Use SetFieldObjectWithoutWriteBarrier to avoid card marking since we are writing to the
+        // image.
+        obj->SetFieldObjectWithoutWriteBarrier<false, true, kVerifyNone>(
+            offset, ForwardObject(ref));
+      }
     }
 
     // java.lang.ref.Reference visitor.
@@ -1167,9 +1127,11 @@
                     ObjPtr<mirror::Reference> ref) const
         REQUIRES_SHARED(Locks::mutator_lock_) REQUIRES(Locks::heap_bitmap_lock_) {
       mirror::Object* obj = ref->GetReferent<kWithoutReadBarrier>();
-      ref->SetFieldObjectWithoutWriteBarrier<false, true, kVerifyNone>(
-          mirror::Reference::ReferentOffset(),
-          ForwardObject(obj));
+      if (obj != nullptr) {
+        ref->SetFieldObjectWithoutWriteBarrier<false, true, kVerifyNone>(
+            mirror::Reference::ReferentOffset(),
+            ForwardObject(obj));
+      }
     }
 
     void operator()(mirror::Object* obj) const
@@ -1915,7 +1877,6 @@
                         image_location.c_str(),
                         /*oat_file=*/ nullptr,
                         logger,
-                        /*thread_pool=*/ nullptr,
                         image_reservation,
                         error_msg);
   }
diff --git a/runtime/hidden_api.h b/runtime/hidden_api.h
index 13bead2..a0eeae2 100644
--- a/runtime/hidden_api.h
+++ b/runtime/hidden_api.h
@@ -236,6 +236,7 @@
       case Intrinsics::kUnsafeFullFence:
       case Intrinsics::kCRC32Update:
       case Intrinsics::kCRC32UpdateBytes:
+      case Intrinsics::kCRC32UpdateByteBuffer:
       case Intrinsics::kStringNewStringFromBytes:
       case Intrinsics::kStringNewStringFromChars:
       case Intrinsics::kStringNewStringFromString:
diff --git a/runtime/image.cc b/runtime/image.cc
index fe1d88f..b6bb0b1 100644
--- a/runtime/image.cc
+++ b/runtime/image.cc
@@ -29,7 +29,7 @@
 namespace art {
 
 const uint8_t ImageHeader::kImageMagic[] = { 'a', 'r', 't', '\n' };
-const uint8_t ImageHeader::kImageVersion[] = { '0', '7', '3', '\0' };  // Image reservation.
+const uint8_t ImageHeader::kImageVersion[] = { '0', '7', '4', '\0' };  // CRC32UpdateBB intrinsic
 
 ImageHeader::ImageHeader(uint32_t image_reservation_size,
                          uint32_t component_count,
diff --git a/runtime/interpreter/interpreter_common.h b/runtime/interpreter/interpreter_common.h
index a633a63..d1896e6 100644
--- a/runtime/interpreter/interpreter_common.h
+++ b/runtime/interpreter/interpreter_common.h
@@ -44,7 +44,7 @@
 #include "handle_scope-inl.h"
 #include "interpreter_mterp_impl.h"
 #include "interpreter_switch_impl.h"
-#include "jit/jit.h"
+#include "jit/jit-inl.h"
 #include "mirror/call_site.h"
 #include "mirror/class-inl.h"
 #include "mirror/dex_cache.h"
diff --git a/runtime/interpreter/interpreter_intrinsics.cc b/runtime/interpreter/interpreter_intrinsics.cc
index 16e118c..2127f1d 100644
--- a/runtime/interpreter/interpreter_intrinsics.cc
+++ b/runtime/interpreter/interpreter_intrinsics.cc
@@ -560,6 +560,7 @@
     UNIMPLEMENTED_CASE(ThreadInterrupted /* ()Z */)
     UNIMPLEMENTED_CASE(CRC32Update /* (II)I */)
     UNIMPLEMENTED_CASE(CRC32UpdateBytes /* (I[BII)I */)
+    UNIMPLEMENTED_CASE(CRC32UpdateByteBuffer /* (IJII)I */)
     INTRINSIC_CASE(VarHandleFullFence)
     INTRINSIC_CASE(VarHandleAcquireFence)
     INTRINSIC_CASE(VarHandleReleaseFence)
diff --git a/runtime/interpreter/interpreter_switch_impl-inl.h b/runtime/interpreter/interpreter_switch_impl-inl.h
index 94cb3de..aec2aa2 100644
--- a/runtime/interpreter/interpreter_switch_impl-inl.h
+++ b/runtime/interpreter/interpreter_switch_impl-inl.h
@@ -26,7 +26,7 @@
 #include "dex/dex_instruction_list.h"
 #include "experimental_flags.h"
 #include "interpreter_common.h"
-#include "jit/jit.h"
+#include "jit/jit-inl.h"
 #include "jvalue-inl.h"
 #include "mirror/string-alloc-inl.h"
 #include "nth_caller_visitor.h"
diff --git a/runtime/intrinsics_list.h b/runtime/intrinsics_list.h
index db43b24..57e81a7 100644
--- a/runtime/intrinsics_list.h
+++ b/runtime/intrinsics_list.h
@@ -221,6 +221,7 @@
   V(ReachabilityFence, kStatic, kNeedsEnvironmentOrCache, kWriteSideEffects, kNoThrow, "Ljava/lang/ref/Reference;", "reachabilityFence", "(Ljava/lang/Object;)V") \
   V(CRC32Update, kStatic, kNeedsEnvironmentOrCache, kNoSideEffects, kNoThrow, "Ljava/util/zip/CRC32;", "update", "(II)I") \
   V(CRC32UpdateBytes, kStatic, kNeedsEnvironmentOrCache, kReadSideEffects, kCanThrow, "Ljava/util/zip/CRC32;", "updateBytes", "(I[BII)I") \
+  V(CRC32UpdateByteBuffer, kStatic, kNeedsEnvironmentOrCache, kReadSideEffects, kNoThrow, "Ljava/util/zip/CRC32;", "updateByteBuffer", "(IJII)I") \
   SIGNATURE_POLYMORPHIC_INTRINSICS_LIST(V)
 
 #endif  // ART_RUNTIME_INTRINSICS_LIST_H_
diff --git a/runtime/jit/debugger_interface.cc b/runtime/jit/debugger_interface.cc
index 853c0ca..99f9387 100644
--- a/runtime/jit/debugger_interface.cc
+++ b/runtime/jit/debugger_interface.cc
@@ -19,14 +19,17 @@
 #include <android-base/logging.h>
 
 #include "base/array_ref.h"
+#include "base/logging.h"
 #include "base/mutex.h"
 #include "base/time_utils.h"
+#include "base/utils.h"
 #include "dex/dex_file.h"
 #include "thread-current-inl.h"
 #include "thread.h"
 
 #include <atomic>
 #include <cstddef>
+#include <deque>
 #include <map>
 
 //
@@ -77,6 +80,10 @@
 //
 
 namespace art {
+
+static Mutex g_jit_debug_lock("JIT native debug entries", kNativeDebugInterfaceLock);
+static Mutex g_dex_debug_lock("DEX native debug entries", kNativeDebugInterfaceLock);
+
 extern "C" {
   enum JITAction {
     JIT_NOACTION = 0,
@@ -127,14 +134,14 @@
   void (*__jit_debug_register_code_ptr)() = __jit_debug_register_code;
 
   // The root data structure describing of all JITed methods.
-  JITDescriptor __jit_debug_descriptor GUARDED_BY(*Locks::native_debug_interface_lock_) {};
+  JITDescriptor __jit_debug_descriptor GUARDED_BY(g_jit_debug_lock) {};
 
   // The following globals mirror the ones above, but are used to register dex files.
   void __attribute__((noinline)) __dex_debug_register_code() {
     __asm__("");
   }
   void (*__dex_debug_register_code_ptr)() = __dex_debug_register_code;
-  JITDescriptor __dex_debug_descriptor GUARDED_BY(*Locks::native_debug_interface_lock_) {};
+  JITDescriptor __dex_debug_descriptor GUARDED_BY(g_dex_debug_lock) {};
 }
 
 // Mark the descriptor as "locked", so native tools know the data is being modified.
@@ -157,8 +164,7 @@
     JITDescriptor& descriptor,
     void (*register_code_ptr)(),
     ArrayRef<const uint8_t> symfile,
-    bool copy_symfile)
-    REQUIRES(Locks::native_debug_interface_lock_) {
+    bool copy_symfile) {
   // Make a copy of the buffer to shrink it and to pass ownership to JITCodeEntry.
   if (copy_symfile) {
     uint8_t* copy = new uint8_t[symfile.size()];
@@ -199,8 +205,7 @@
     JITDescriptor& descriptor,
     void (*register_code_ptr)(),
     JITCodeEntry* entry,
-    bool free_symfile)
-    REQUIRES(Locks::native_debug_interface_lock_) {
+    bool free_symfile) {
   CHECK(entry != nullptr);
   const uint8_t* symfile = entry->symfile_addr_;
 
@@ -238,11 +243,10 @@
   }
 }
 
-static std::map<const DexFile*, JITCodeEntry*> g_dex_debug_entries
-    GUARDED_BY(*Locks::native_debug_interface_lock_);
+static std::map<const DexFile*, JITCodeEntry*> g_dex_debug_entries GUARDED_BY(g_dex_debug_lock);
 
 void AddNativeDebugInfoForDex(Thread* self, const DexFile* dexfile) {
-  MutexLock mu(self, *Locks::native_debug_interface_lock_);
+  MutexLock mu(self, g_dex_debug_lock);
   DCHECK(dexfile != nullptr);
   // This is just defensive check. The class linker should not register the dex file twice.
   if (g_dex_debug_entries.count(dexfile) == 0) {
@@ -256,7 +260,7 @@
 }
 
 void RemoveNativeDebugInfoForDex(Thread* self, const DexFile* dexfile) {
-  MutexLock mu(self, *Locks::native_debug_interface_lock_);
+  MutexLock mu(self, g_dex_debug_lock);
   auto it = g_dex_debug_entries.find(dexfile);
   // We register dex files in the class linker and free them in DexFile_closeDexFile, but
   // there might be cases where we load the dex file without using it in the class linker.
@@ -270,46 +274,133 @@
 }
 
 // Mapping from handle to entry. Used to manage life-time of the entries.
-static std::map<const void*, JITCodeEntry*> g_jit_debug_entries
-    GUARDED_BY(*Locks::native_debug_interface_lock_);
+static std::multimap<const void*, JITCodeEntry*> g_jit_debug_entries GUARDED_BY(g_jit_debug_lock);
+
+// Number of entries added since last packing.  Used to pack entries in bulk.
+static size_t g_jit_num_unpacked_entries GUARDED_BY(g_jit_debug_lock) = 0;
+
+// We postpone removal so that it is done in bulk.
+static std::deque<const void*> g_jit_removed_entries GUARDED_BY(g_jit_debug_lock);
+
+// Split the JIT code cache into groups of fixed size and create singe JITCodeEntry for each group.
+// The start address of method's code determines which group it belongs to.  The end is irrelevant.
+// As a consequnce, newly added mini debug infos will be merged and old ones (GCed) will be pruned.
+static void MaybePackJitMiniDebugInfo(PackElfFileForJITFunction pack,
+                                      InstructionSet isa,
+                                      const InstructionSetFeatures* features)
+    REQUIRES(g_jit_debug_lock) {
+  // Size of memory range covered by each JITCodeEntry.
+  // The number of methods per entry is variable (depending on how many fit in that range).
+  constexpr uint32_t kGroupSize = 64 * KB;
+  // Even if there are no removed entries, we want to pack new entries on regular basis.
+  constexpr uint32_t kPackFrequency = 64;
+
+  std::deque<const void*>& removed_entries = g_jit_removed_entries;
+  std::sort(removed_entries.begin(), removed_entries.end());
+  if (removed_entries.empty() && g_jit_num_unpacked_entries < kPackFrequency) {
+    return;  // Nothing to do.
+  }
+
+  std::vector<const uint8_t*> added_elf_files;
+  std::vector<const void*> removed_symbols;
+  auto added_it = g_jit_debug_entries.begin();
+  auto removed_it = removed_entries.begin();
+  while (added_it != g_jit_debug_entries.end()) {
+    // Collect all entries that have been added or removed within our memory range.
+    const void* group_ptr = AlignDown(added_it->first, kGroupSize);
+    added_elf_files.clear();
+    auto added_begin = added_it;
+    while (added_it != g_jit_debug_entries.end() &&
+           AlignDown(added_it->first, kGroupSize) == group_ptr) {
+      added_elf_files.push_back((added_it++)->second->symfile_addr_);
+    }
+    removed_symbols.clear();
+    while (removed_it != removed_entries.end() &&
+           AlignDown(*removed_it, kGroupSize) == group_ptr) {
+      removed_symbols.push_back(*(removed_it++));
+    }
+
+    // Create new singe JITCodeEntry that covers this memory range.
+    if (added_elf_files.size() == 1 && removed_symbols.size() == 0) {
+      continue;  // Nothing changed in this memory range.
+    }
+    uint64_t start_time = MilliTime();
+    size_t symbols;
+    std::vector<uint8_t> packed = pack(isa, features, added_elf_files, removed_symbols, &symbols);
+    VLOG(jit)
+        << "JIT mini-debug-info packed"
+        << " for " << group_ptr
+        << " in " << MilliTime() - start_time << "ms"
+        << " files=" << added_elf_files.size()
+        << " removed=" << removed_symbols.size()
+        << " symbols=" << symbols
+        << " size=" << PrettySize(packed.size());
+
+    // Replace the old entries with the new one (with their lifetime temporally overlapping).
+    JITCodeEntry* packed_entry = CreateJITCodeEntryInternal(
+        __jit_debug_descriptor,
+        __jit_debug_register_code_ptr,
+        ArrayRef<const uint8_t>(packed),
+        /*copy_symfile=*/ true);
+    for (auto it = added_begin; it != added_it; ++it) {
+      DeleteJITCodeEntryInternal(__jit_debug_descriptor,
+                                 __jit_debug_register_code_ptr,
+                                 /*entry=*/ it->second,
+                                 /*free_symfile=*/ true);
+    }
+    g_jit_debug_entries.erase(added_begin, added_it);
+    g_jit_debug_entries.emplace(group_ptr, packed_entry);
+  }
+  CHECK(added_it == g_jit_debug_entries.end());
+  CHECK(removed_it == removed_entries.end());
+  removed_entries.clear();
+  g_jit_num_unpacked_entries = 0;
+}
 
 void AddNativeDebugInfoForJit(Thread* self,
                               const void* code_ptr,
-                              const std::vector<uint8_t>& symfile) {
-  MutexLock mu(self, *Locks::native_debug_interface_lock_);
+                              const std::vector<uint8_t>& symfile,
+                              PackElfFileForJITFunction pack,
+                              InstructionSet isa,
+                              const InstructionSetFeatures* features) {
+  MutexLock mu(self, g_jit_debug_lock);
   DCHECK_NE(symfile.size(), 0u);
 
+  MaybePackJitMiniDebugInfo(pack, isa, features);
+
   JITCodeEntry* entry = CreateJITCodeEntryInternal(
       __jit_debug_descriptor,
       __jit_debug_register_code_ptr,
       ArrayRef<const uint8_t>(symfile),
       /*copy_symfile=*/ true);
 
+  VLOG(jit)
+      << "JIT mini-debug-info added"
+      << " for " << code_ptr
+      << " size=" << PrettySize(symfile.size());
+
   // We don't provide code_ptr for type debug info, which means we cannot free it later.
   // (this only happens when --generate-debug-info flag is enabled for the purpose
   // of being debugged with gdb; it does not happen for debuggable apps by default).
   if (code_ptr != nullptr) {
-    bool ok = g_jit_debug_entries.emplace(code_ptr, entry).second;
-    DCHECK(ok) << "Native debug entry already exists for " << std::hex << code_ptr;
+    g_jit_debug_entries.emplace(code_ptr, entry);
+    // Count how many entries we have added since the last mini-debug-info packing.
+    // We avoid g_jit_debug_entries.size() here because it can shrink during packing.
+    g_jit_num_unpacked_entries++;
   }
 }
 
 void RemoveNativeDebugInfoForJit(Thread* self, const void* code_ptr) {
-  MutexLock mu(self, *Locks::native_debug_interface_lock_);
-  auto it = g_jit_debug_entries.find(code_ptr);
+  MutexLock mu(self, g_jit_debug_lock);
   // We generate JIT native debug info only if the right runtime flags are enabled,
   // but we try to remove it unconditionally whenever code is freed from JIT cache.
-  if (it != g_jit_debug_entries.end()) {
-    DeleteJITCodeEntryInternal(__jit_debug_descriptor,
-                               __jit_debug_register_code_ptr,
-                               it->second,
-                               /*free_symfile=*/ true);
-    g_jit_debug_entries.erase(it);
+  if (!g_jit_debug_entries.empty()) {
+    g_jit_removed_entries.push_back(code_ptr);
   }
 }
 
 size_t GetJitMiniDebugInfoMemUsage() {
-  MutexLock mu(Thread::Current(), *Locks::native_debug_interface_lock_);
+  MutexLock mu(Thread::Current(), g_jit_debug_lock);
   size_t size = 0;
   for (auto entry : g_jit_debug_entries) {
     size += sizeof(JITCodeEntry) + entry.second->symfile_size_ + /*map entry*/ 4 * sizeof(void*);
diff --git a/runtime/jit/debugger_interface.h b/runtime/jit/debugger_interface.h
index 4b0d011..17beb4b 100644
--- a/runtime/jit/debugger_interface.h
+++ b/runtime/jit/debugger_interface.h
@@ -20,6 +20,7 @@
 #include <inttypes.h>
 #include <vector>
 
+#include "arch/instruction_set_features.h"
 #include "base/locks.h"
 
 namespace art {
@@ -27,28 +28,35 @@
 class DexFile;
 class Thread;
 
+// This method is declared in the compiler library.
+// We need to pass it by pointer to be able to call it from runtime.
+typedef std::vector<uint8_t> PackElfFileForJITFunction(
+    InstructionSet isa,
+    const InstructionSetFeatures* features,
+    std::vector<const uint8_t*>& added_elf_files,
+    std::vector<const void*>& removed_symbols,
+    /*out*/ size_t* num_symbols);
+
 // Notify native tools (e.g. libunwind) that DEX file has been opened.
-void AddNativeDebugInfoForDex(Thread* self, const DexFile* dexfile)
-    REQUIRES(!Locks::native_debug_interface_lock_);
+void AddNativeDebugInfoForDex(Thread* self, const DexFile* dexfile);
 
 // Notify native tools (e.g. libunwind) that DEX file has been closed.
-void RemoveNativeDebugInfoForDex(Thread* self, const DexFile* dexfile)
-    REQUIRES(!Locks::native_debug_interface_lock_);
+void RemoveNativeDebugInfoForDex(Thread* self, const DexFile* dexfile);
 
 // Notify native tools (e.g. libunwind) that JIT has compiled a new method.
 // The method will make copy of the passed ELF file (to shrink it to the minimum size).
 void AddNativeDebugInfoForJit(Thread* self,
                               const void* code_ptr,
-                              const std::vector<uint8_t>& symfile)
-    REQUIRES(!Locks::native_debug_interface_lock_);
+                              const std::vector<uint8_t>& symfile,
+                              PackElfFileForJITFunction pack,
+                              InstructionSet isa,
+                              const InstructionSetFeatures* features);
 
 // Notify native tools (e.g. libunwind) that JIT code has been garbage collected.
-void RemoveNativeDebugInfoForJit(Thread* self, const void* code_ptr)
-    REQUIRES(!Locks::native_debug_interface_lock_);
+void RemoveNativeDebugInfoForJit(Thread* self, const void* code_ptr);
 
 // Returns approximate memory used by debug info for JIT code.
-size_t GetJitMiniDebugInfoMemUsage()
-    REQUIRES(!Locks::native_debug_interface_lock_);
+size_t GetJitMiniDebugInfoMemUsage();
 
 }  // namespace art
 
diff --git a/runtime/jit/jit-inl.h b/runtime/jit/jit-inl.h
new file mode 100644
index 0000000..80324ad
--- /dev/null
+++ b/runtime/jit/jit-inl.h
@@ -0,0 +1,69 @@
+/*
+ * Copyright 2018 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef ART_RUNTIME_JIT_JIT_INL_H_
+#define ART_RUNTIME_JIT_JIT_INL_H_
+
+#include "jit/jit.h"
+
+#include "art_method.h"
+#include "base/bit_utils.h"
+#include "thread.h"
+#include "runtime-inl.h"
+
+namespace art {
+namespace jit {
+
+inline bool Jit::ShouldUsePriorityThreadWeight(Thread* self) {
+  return self->IsJitSensitiveThread() && Runtime::Current()->InJankPerceptibleProcessState();
+}
+
+inline void Jit::AddSamples(Thread* self,
+                            ArtMethod* method,
+                            uint16_t samples,
+                            bool with_backedges) {
+  if (Jit::ShouldUsePriorityThreadWeight(self)) {
+    samples *= PriorityThreadWeight();
+  }
+  uint32_t old_count = method->GetCounter();
+  uint32_t new_count = old_count + samples;
+
+  // The full check is fairly expensive so we just add to hotness most of the time,
+  // and we do the full check only when some of the higher bits of the count change.
+  // NB: The method needs to see the transitions of the counter past the thresholds.
+  uint32_t old_batch = RoundDown(old_count, kJitSamplesBatchSize);  // Clear lower bits.
+  uint32_t new_batch = RoundDown(new_count, kJitSamplesBatchSize);  // Clear lower bits.
+  if (UNLIKELY(old_batch == 0)) {
+    // For low sample counts, we check every time (which is important for tests).
+    if (!MaybeCompileMethod(self, method, old_count, new_count, with_backedges)) {
+      // Tests may check that the counter is 0 for methods that we never compile.
+      return;  // Ignore the samples for now and retry later.
+    }
+  } else if (UNLIKELY(old_batch != new_batch)) {
+    // For high sample counts, we check only when we move past the batch boundary.
+    if (!MaybeCompileMethod(self, method, old_batch, new_batch, with_backedges)) {
+      // OSR compilation will ignore the samples if they don't have backedges.
+      return;  // Ignore the samples for now and retry later.
+    }
+  }
+
+  method->SetCounter(new_count);
+}
+
+}  // namespace jit
+}  // namespace art
+
+#endif  // ART_RUNTIME_JIT_JIT_INL_H_
diff --git a/runtime/jit/jit.cc b/runtime/jit/jit.cc
index 03c97f4..d44bd59 100644
--- a/runtime/jit/jit.cc
+++ b/runtime/jit/jit.cc
@@ -28,6 +28,7 @@
 #include "debugger.h"
 #include "entrypoints/runtime_asm_entrypoints.h"
 #include "interpreter/interpreter.h"
+#include "jit-inl.h"
 #include "jit_code_cache.h"
 #include "jni/java_vm_ext.h"
 #include "mirror/method_handle_impl.h"
@@ -68,6 +69,14 @@
 };
 DEFINE_RUNTIME_DEBUG_FLAG(StressModeHelper, kSlowMode);
 
+uint32_t JitOptions::RoundUpThreshold(uint32_t threshold) {
+  if (threshold > kJitSamplesBatchSize) {
+    threshold = RoundUp(threshold, kJitSamplesBatchSize);
+  }
+  CHECK_LE(threshold, std::numeric_limits<uint16_t>::max());
+  return threshold;
+}
+
 JitOptions* JitOptions::CreateFromRuntimeArguments(const RuntimeArgumentMap& options) {
   auto* jit_options = new JitOptions;
   jit_options->use_jit_compilation_ = options.GetOrDefault(RuntimeArgumentMap::UseJitCompilation);
@@ -93,30 +102,25 @@
                    : kJitStressDefaultCompileThreshold)
             : kJitDefaultCompileThreshold;
   }
-  if (jit_options->compile_threshold_ > std::numeric_limits<uint16_t>::max()) {
-    LOG(FATAL) << "Method compilation threshold is above its internal limit.";
-  }
+  jit_options->compile_threshold_ = RoundUpThreshold(jit_options->compile_threshold_);
 
   if (options.Exists(RuntimeArgumentMap::JITWarmupThreshold)) {
     jit_options->warmup_threshold_ = *options.Get(RuntimeArgumentMap::JITWarmupThreshold);
-    if (jit_options->warmup_threshold_ > std::numeric_limits<uint16_t>::max()) {
-      LOG(FATAL) << "Method warmup threshold is above its internal limit.";
-    }
   } else {
     jit_options->warmup_threshold_ = jit_options->compile_threshold_ / 2;
   }
+  jit_options->warmup_threshold_ = RoundUpThreshold(jit_options->warmup_threshold_);
 
   if (options.Exists(RuntimeArgumentMap::JITOsrThreshold)) {
     jit_options->osr_threshold_ = *options.Get(RuntimeArgumentMap::JITOsrThreshold);
-    if (jit_options->osr_threshold_ > std::numeric_limits<uint16_t>::max()) {
-      LOG(FATAL) << "Method on stack replacement threshold is above its internal limit.";
-    }
   } else {
     jit_options->osr_threshold_ = jit_options->compile_threshold_ * 2;
     if (jit_options->osr_threshold_ > std::numeric_limits<uint16_t>::max()) {
-      jit_options->osr_threshold_ = std::numeric_limits<uint16_t>::max();
+      jit_options->osr_threshold_ =
+          RoundDown(std::numeric_limits<uint16_t>::max(), kJitSamplesBatchSize);
     }
   }
+  jit_options->osr_threshold_ = RoundUpThreshold(jit_options->osr_threshold_);
 
   if (options.Exists(RuntimeArgumentMap::JITPriorityThreadWeight)) {
     jit_options->priority_thread_weight_ =
@@ -149,10 +153,6 @@
   return jit_options;
 }
 
-bool Jit::ShouldUsePriorityThreadWeight(Thread* self) {
-  return self->IsJitSensitiveThread() && Runtime::Current()->InJankPerceptibleProcessState();
-}
-
 void Jit::DumpInfo(std::ostream& os) {
   code_cache_->Dump(os);
   cumulative_timings_.Dump(os);
@@ -639,20 +639,24 @@
   return false;
 }
 
-void Jit::AddSamples(Thread* self, ArtMethod* method, uint16_t count, bool with_backedges) {
+bool Jit::MaybeCompileMethod(Thread* self,
+                             ArtMethod* method,
+                             uint32_t old_count,
+                             uint32_t new_count,
+                             bool with_backedges) {
   if (thread_pool_ == nullptr) {
     // Should only see this when shutting down, starting up, or in safe mode.
     DCHECK(Runtime::Current()->IsShuttingDown(self) ||
            !Runtime::Current()->IsFinishedStarting() ||
            Runtime::Current()->IsSafeMode());
-    return;
+    return false;
   }
   if (IgnoreSamplesForMethod(method)) {
-    return;
+    return false;
   }
   if (HotMethodThreshold() == 0) {
     // Tests might request JIT on first use (compiled synchronously in the interpreter).
-    return;
+    return false;
   }
   DCHECK(thread_pool_ != nullptr);
   DCHECK_GT(WarmMethodThreshold(), 0);
@@ -661,15 +665,9 @@
   DCHECK_GE(PriorityThreadWeight(), 1);
   DCHECK_LE(PriorityThreadWeight(), HotMethodThreshold());
 
-  uint16_t starting_count = method->GetCounter();
-  if (Jit::ShouldUsePriorityThreadWeight(self)) {
-    count *= PriorityThreadWeight();
-  }
-  uint32_t new_count = starting_count + count;
-  // Note: Native method have no "warm" state or profiling info.
-  if (LIKELY(!method->IsNative()) && starting_count < WarmMethodThreshold()) {
-    if ((new_count >= WarmMethodThreshold()) &&
-        (method->GetProfilingInfo(kRuntimePointerSize) == nullptr)) {
+  if (old_count < WarmMethodThreshold() && new_count >= WarmMethodThreshold()) {
+    // Note: Native method have no "warm" state or profiling info.
+    if (!method->IsNative() && method->GetProfilingInfo(kRuntimePointerSize) == nullptr) {
       bool success = ProfilingInfo::Create(self, method, /* retry_allocation= */ false);
       if (success) {
         VLOG(jit) << "Start profiling " << method->PrettyMethod();
@@ -679,7 +677,7 @@
         // Calling ProfilingInfo::Create might put us in a suspended state, which could
         // lead to the thread pool being deleted when we are shutting down.
         DCHECK(Runtime::Current()->IsShuttingDown(self));
-        return;
+        return false;
       }
 
       if (!success) {
@@ -689,32 +687,27 @@
             self, new JitCompileTask(method, JitCompileTask::TaskKind::kAllocateProfile));
       }
     }
-    // Avoid jumping more than one state at a time.
-    new_count = std::min(new_count, static_cast<uint32_t>(HotMethodThreshold() - 1));
-  } else if (UseJitCompilation()) {
-    if (starting_count < HotMethodThreshold()) {
-      if ((new_count >= HotMethodThreshold()) &&
-          !code_cache_->ContainsPc(method->GetEntryPointFromQuickCompiledCode())) {
+  }
+  if (UseJitCompilation()) {
+    if (old_count < HotMethodThreshold() && new_count >= HotMethodThreshold()) {
+      if (!code_cache_->ContainsPc(method->GetEntryPointFromQuickCompiledCode())) {
         DCHECK(thread_pool_ != nullptr);
         thread_pool_->AddTask(self, new JitCompileTask(method, JitCompileTask::TaskKind::kCompile));
       }
-      // Avoid jumping more than one state at a time.
-      new_count = std::min(new_count, static_cast<uint32_t>(OSRMethodThreshold() - 1));
-    } else if (starting_count < OSRMethodThreshold()) {
+    }
+    if (old_count < OSRMethodThreshold() && new_count >= OSRMethodThreshold()) {
       if (!with_backedges) {
-        // If the samples don't contain any back edge, we don't increment the hotness.
-        return;
+        return false;
       }
       DCHECK(!method->IsNative());  // No back edges reported for native methods.
-      if ((new_count >= OSRMethodThreshold()) &&  !code_cache_->IsOsrCompiled(method)) {
+      if (!code_cache_->IsOsrCompiled(method)) {
         DCHECK(thread_pool_ != nullptr);
         thread_pool_->AddTask(
             self, new JitCompileTask(method, JitCompileTask::TaskKind::kCompileOsr));
       }
     }
   }
-  // Update hotness counter
-  method->SetCounter(new_count);
+  return true;
 }
 
 class ScopedSetRuntimeThread {
diff --git a/runtime/jit/jit.h b/runtime/jit/jit.h
index e5c9766..714db3a 100644
--- a/runtime/jit/jit.h
+++ b/runtime/jit/jit.h
@@ -47,6 +47,7 @@
 // At what priority to schedule jit threads. 9 is the lowest foreground priority on device.
 // See android/os/Process.java.
 static constexpr int kJitPoolThreadPthreadDefaultPriority = 9;
+static constexpr uint32_t kJitSamplesBatchSize = 32;  // Must be power of 2.
 
 class JitOptions {
  public:
@@ -122,12 +123,16 @@
   }
 
  private:
+  // We add the sample in batches of size kJitSamplesBatchSize.
+  // This method rounds the threshold so that it is multiple of the batch size.
+  static uint32_t RoundUpThreshold(uint32_t threshold);
+
   bool use_jit_compilation_;
   size_t code_cache_initial_capacity_;
   size_t code_cache_max_capacity_;
-  uint16_t compile_threshold_;
-  uint16_t warmup_threshold_;
-  uint16_t osr_threshold_;
+  uint32_t compile_threshold_;
+  uint32_t warmup_threshold_;
+  uint32_t osr_threshold_;
   uint16_t priority_thread_weight_;
   uint16_t invoke_transition_weight_;
   bool dump_info_on_shutdown_;
@@ -154,7 +159,7 @@
   static constexpr size_t kDefaultPriorityThreadWeightRatio = 1000;
   static constexpr size_t kDefaultInvokeTransitionWeightRatio = 500;
   // How frequently should the interpreter check to see if OSR compilation is ready.
-  static constexpr int16_t kJitRecheckOSRThreshold = 100;
+  static constexpr int16_t kJitRecheckOSRThreshold = 101;  // Prime number to avoid patterns.
 
   virtual ~Jit();
 
@@ -218,7 +223,10 @@
   void MethodEntered(Thread* thread, ArtMethod* method)
       REQUIRES_SHARED(Locks::mutator_lock_);
 
-  void AddSamples(Thread* self, ArtMethod* method, uint16_t samples, bool with_backedges)
+  ALWAYS_INLINE void AddSamples(Thread* self,
+                                ArtMethod* method,
+                                uint16_t samples,
+                                bool with_backedges)
       REQUIRES_SHARED(Locks::mutator_lock_);
 
   void InvokeVirtualOrInterface(ObjPtr<mirror::Object> this_object,
@@ -298,6 +306,15 @@
  private:
   Jit(JitCodeCache* code_cache, JitOptions* options);
 
+  // Compile the method if the number of samples passes a threshold.
+  // Returns false if we can not compile now - don't increment the counter and retry later.
+  bool MaybeCompileMethod(Thread* self,
+                          ArtMethod* method,
+                          uint32_t old_count,
+                          uint32_t new_count,
+                          bool with_backedges)
+      REQUIRES_SHARED(Locks::mutator_lock_);
+
   static bool BindCompilerMethods(std::string* error_msg);
 
   // JIT compiler
diff --git a/runtime/jni/java_vm_ext.cc b/runtime/jni/java_vm_ext.cc
index a61a48a..7a9d292 100644
--- a/runtime/jni/java_vm_ext.cc
+++ b/runtime/jni/java_vm_ext.cc
@@ -535,8 +535,6 @@
   if (current_method != nullptr) {
     os << "\n    from " << current_method->PrettyMethod();
   }
-  os << "\n";
-  self->Dump(os);
 
   if (check_jni_abort_hook_ != nullptr) {
     check_jni_abort_hook_(check_jni_abort_hook_data_, os.str());
diff --git a/runtime/oat_file.cc b/runtime/oat_file.cc
index 5c5523d..de4826f 100644
--- a/runtime/oat_file.cc
+++ b/runtime/oat_file.cc
@@ -581,9 +581,9 @@
     const char* dex_file_location_data = reinterpret_cast<const char*>(oat);
     oat += dex_file_location_size;
 
-    std::string dex_file_location = ResolveRelativeEncodedDexLocation(
-        abs_dex_location,
-        std::string(dex_file_location_data, dex_file_location_size));
+    std::string dex_file_location(dex_file_location_data, dex_file_location_size);
+    std::string dex_file_name =
+        ResolveRelativeEncodedDexLocation(abs_dex_location, dex_file_location);
 
     uint32_t dex_file_checksum;
     if (UNLIKELY(!ReadOatDexFileData(*this, &oat, &dex_file_checksum))) {
@@ -638,7 +638,7 @@
                                            error_msg,
                                            uncompressed_dex_files_.get());
         } else {
-          loaded = dex_file_loader.Open(dex_file_location.c_str(),
+          loaded = dex_file_loader.Open(dex_file_name.c_str(),
                                         dex_file_location,
                                         /*verify=*/ false,
                                         /*verify_checksum=*/ false,
@@ -819,7 +819,7 @@
         this, header->string_ids_size_, sizeof(GcRoot<mirror::String>), string_bss_mapping);
 
     std::string canonical_location =
-        DexFileLoader::GetDexCanonicalLocation(dex_file_location.c_str());
+        DexFileLoader::GetDexCanonicalLocation(dex_file_name.c_str());
 
     // Create the OatDexFile and add it to the owning container.
     OatDexFile* oat_dex_file = new OatDexFile(this,
diff --git a/runtime/runtime.cc b/runtime/runtime.cc
index 8495fb5..f30ba0c 100644
--- a/runtime/runtime.cc
+++ b/runtime/runtime.cc
@@ -34,6 +34,7 @@
 #include <cstdio>
 #include <cstdlib>
 #include <limits>
+#include <thread>
 #include <vector>
 
 #include "android-base/strings.h"
@@ -233,6 +234,7 @@
       class_linker_(nullptr),
       signal_catcher_(nullptr),
       java_vm_(nullptr),
+      thread_pool_ref_count_(0u),
       fault_message_(nullptr),
       threads_being_born_(0),
       shutdown_cond_(new ConditionVariable("Runtime shutdown", *Locks::runtime_shutdown_lock_)),
@@ -350,6 +352,8 @@
         << "\n";
   }
 
+  WaitForThreadPoolWorkersToStart();
+
   if (jit_ != nullptr) {
     // Wait for the workers to be created since there can't be any threads attaching during
     // shutdown.
@@ -402,6 +406,8 @@
     // JIT compiler threads.
     jit_->DeleteThreadPool();
   }
+  DeleteThreadPool();
+  CHECK(thread_pool_ == nullptr);
 
   // Make sure our internal threads are dead before we start tearing down things they're using.
   GetRuntimeCallbacks()->StopDebugger();
@@ -932,6 +938,18 @@
 
   // Create the thread pools.
   heap_->CreateThreadPool();
+  {
+    ScopedTrace timing("CreateThreadPool");
+    constexpr size_t kStackSize = 64 * KB;
+    constexpr size_t kMaxRuntimeWorkers = 4u;
+    const size_t num_workers =
+        std::min(static_cast<size_t>(std::thread::hardware_concurrency()), kMaxRuntimeWorkers);
+    MutexLock mu(Thread::Current(), *Locks::runtime_thread_pool_lock_);
+    CHECK(thread_pool_ == nullptr);
+    thread_pool_.reset(new ThreadPool("Runtime", num_workers, /*create_peers=*/false, kStackSize));
+    thread_pool_->StartWorkers(Thread::Current());
+  }
+
   // Reset the gc performance data at zygote fork so that the GCs
   // before fork aren't attributed to an app.
   heap_->ResetGcPerformanceInfo();
@@ -2660,4 +2678,45 @@
     GetClassLinker()->VisitClasses(&visitor);
   }
 }
+
+Runtime::ScopedThreadPoolUsage::ScopedThreadPoolUsage()
+    : thread_pool_(Runtime::Current()->AcquireThreadPool()) {}
+
+Runtime::ScopedThreadPoolUsage::~ScopedThreadPoolUsage() {
+  Runtime::Current()->ReleaseThreadPool();
+}
+
+bool Runtime::DeleteThreadPool() {
+  // Make sure workers are started to prevent thread shutdown errors.
+  WaitForThreadPoolWorkersToStart();
+  std::unique_ptr<ThreadPool> thread_pool;
+  {
+    MutexLock mu(Thread::Current(), *Locks::runtime_thread_pool_lock_);
+    if (thread_pool_ref_count_ == 0) {
+      thread_pool = std::move(thread_pool_);
+    }
+  }
+  return thread_pool != nullptr;
+}
+
+ThreadPool* Runtime::AcquireThreadPool() {
+  MutexLock mu(Thread::Current(), *Locks::runtime_thread_pool_lock_);
+  ++thread_pool_ref_count_;
+  return thread_pool_.get();
+}
+
+void Runtime::ReleaseThreadPool() {
+  MutexLock mu(Thread::Current(), *Locks::runtime_thread_pool_lock_);
+  CHECK_GT(thread_pool_ref_count_, 0u);
+  --thread_pool_ref_count_;
+}
+
+void Runtime::WaitForThreadPoolWorkersToStart() {
+  // Need to make sure workers are created before deleting the pool.
+  ScopedThreadPoolUsage stpu;
+  if (stpu.GetThreadPool() != nullptr) {
+    stpu.GetThreadPool()->WaitForWorkersToBeCreated();
+  }
+}
+
 }  // namespace art
diff --git a/runtime/runtime.h b/runtime/runtime.h
index 76cfcd1..a2d519d 100644
--- a/runtime/runtime.h
+++ b/runtime/runtime.h
@@ -792,6 +792,28 @@
     return verifier_logging_threshold_ms_;
   }
 
+  // Atomically delete the thread pool if the reference count is 0.
+  bool DeleteThreadPool() REQUIRES(!Locks::runtime_thread_pool_lock_);
+
+  // Wait for all the thread workers to be attached.
+  void WaitForThreadPoolWorkersToStart() REQUIRES(!Locks::runtime_thread_pool_lock_);
+
+  // Scoped usage of the runtime thread pool. Prevents the pool from being
+  // deleted. Note that the thread pool is only for startup and gets deleted after.
+  class ScopedThreadPoolUsage {
+   public:
+    ScopedThreadPoolUsage();
+    ~ScopedThreadPoolUsage();
+
+    // Return the thread pool.
+    ThreadPool* GetThreadPool() const {
+      return thread_pool_;
+    }
+
+   private:
+    ThreadPool* const thread_pool_;
+  };
+
  private:
   static void InitPlatformSignalHandlers();
 
@@ -828,6 +850,9 @@
   //       friend).
   std::string GetFaultMessage();
 
+  ThreadPool* AcquireThreadPool() REQUIRES(!Locks::runtime_thread_pool_lock_);
+  void ReleaseThreadPool() REQUIRES(!Locks::runtime_thread_pool_lock_);
+
   // A pointer to the active runtime or null.
   static Runtime* instance_;
 
@@ -911,6 +936,10 @@
   std::unique_ptr<jit::JitCodeCache> jit_code_cache_;
   std::unique_ptr<jit::JitOptions> jit_options_;
 
+  // Runtime thread pool. The pool is only for startup and gets deleted after.
+  std::unique_ptr<ThreadPool> thread_pool_ GUARDED_BY(Locks::runtime_thread_pool_lock_);
+  size_t thread_pool_ref_count_ GUARDED_BY(Locks::runtime_thread_pool_lock_);
+
   // Fault message, printed when we get a SIGSEGV. Stored as a native-heap object and accessed
   // lock-free, so needs to be atomic.
   std::atomic<std::string*> fault_message_;
@@ -1115,6 +1144,7 @@
 
   // Note: See comments on GetFaultMessage.
   friend std::string GetFaultMessageForAbortLogging();
+  friend class ScopedThreadPoolUsage;
 
   DISALLOW_COPY_AND_ASSIGN(Runtime);
 };
diff --git a/runtime/runtime_callbacks_test.cc b/runtime/runtime_callbacks_test.cc
index f2e5012..d08be72 100644
--- a/runtime/runtime_callbacks_test.cc
+++ b/runtime/runtime_callbacks_test.cc
@@ -147,6 +147,8 @@
   self->TransitionFromSuspendedToRunnable();
   bool started = runtime_->Start();
   ASSERT_TRUE(started);
+  // Make sure the workers are done starting so we don't get callbacks for them.
+  runtime_->WaitForThreadPoolWorkersToStart();
 
   cb_.state = CallbackState::kBase;  // Ignore main thread attach.
 
diff --git a/test/1919-vminit-thread-start-timing/src/art/Test1919.java b/test/1919-vminit-thread-start-timing/src/art/Test1919.java
index 3d5c079..f6b770f 100644
--- a/test/1919-vminit-thread-start-timing/src/art/Test1919.java
+++ b/test/1919-vminit-thread-start-timing/src/art/Test1919.java
@@ -21,10 +21,12 @@
 
   public static void run() {
     for (Event e : getEvents()) {
-      if (PRINT_ALL_THREADS ||
-          e.thr.equals(Thread.currentThread()) ||
-          e.thr.getName().equals("JVMTI_THREAD-Test1919")) {
-        System.out.println(e.name + ": " + e.thr.getName());
+      if (e.thr != null) {
+        if (PRINT_ALL_THREADS ||
+            e.thr.equals(Thread.currentThread()) ||
+            e.thr.getName().equals("JVMTI_THREAD-Test1919")) {
+          System.out.println(e.name + ": " + e.thr.getName());
+        }
       }
     }
   }
diff --git a/test/580-crc32/src/Main.java b/test/580-crc32/src/Main.java
index 6199e9b..dfc0b3c 100644
--- a/test/580-crc32/src/Main.java
+++ b/test/580-crc32/src/Main.java
@@ -16,6 +16,7 @@
 
 import java.util.zip.CRC32;
 import java.util.Random;
+import java.nio.ByteBuffer;
 
 /**
  * The ART compiler can use intrinsics for the java.util.zip.CRC32 methods:
@@ -343,8 +344,193 @@
                 CRC32ByteArray(bytes, off, len));
   }
 
+  private static long CRC32ByteBuffer(byte[] bytes, int off, int len) {
+    ByteBuffer buf = ByteBuffer.wrap(bytes, 0, off + len);
+    buf.position(off);
+    CRC32 crc32 = new CRC32();
+    crc32.update(buf);
+    return crc32.getValue();
+  }
+
+  private static void TestCRC32UpdateByteBuffer() {
+    assertEqual(0L, CRC32ByteBuffer(new byte[] {}, 0, 0));
+    assertEqual(0L, CRC32ByteBuffer(new byte[] {0}, 0, 0));
+    assertEqual(0L, CRC32ByteBuffer(new byte[] {0}, 1, 0));
+    assertEqual(0L, CRC32ByteBuffer(new byte[] {0, 0}, 1, 0));
+
+    assertEqual(CRC32Byte(0), CRC32ByteBuffer(new byte[] {0}, 0, 1));
+    assertEqual(CRC32Byte(1), CRC32ByteBuffer(new byte[] {1}, 0, 1));
+    assertEqual(CRC32Byte(0x0f), CRC32ByteBuffer(new byte[] {0x0f}, 0, 1));
+    assertEqual(CRC32Byte(0xff), CRC32ByteBuffer(new byte[] {-1}, 0, 1));
+    assertEqual(CRC32BytesUsingUpdateInt(0, 0, 0),
+                CRC32ByteBuffer(new byte[] {0, 0, 0}, 0, 3));
+    assertEqual(CRC32BytesUsingUpdateInt(1, 1, 1),
+                CRC32ByteBuffer(new byte[] {1, 1, 1}, 0, 3));
+    assertEqual(CRC32BytesUsingUpdateInt(0x0f, 0x0f, 0x0f),
+                CRC32ByteBuffer(new byte[] {0x0f, 0x0f, 0x0f}, 0, 3));
+    assertEqual(CRC32BytesUsingUpdateInt(0xff, 0xff, 0xff),
+                CRC32ByteBuffer(new byte[] {-1, -1, -1}, 0, 3));
+    assertEqual(CRC32BytesUsingUpdateInt(1, 2),
+                CRC32ByteBuffer(new byte[] {1, 2}, 0, 2));
+    assertEqual(
+        CRC32BytesUsingUpdateInt(0, -1, Byte.MIN_VALUE, Byte.MAX_VALUE),
+        CRC32ByteBuffer(new byte[] {0, -1, Byte.MIN_VALUE, Byte.MAX_VALUE}, 0, 4));
+
+    byte[] bytes = new byte[128 * 1024];
+    Random rnd = new Random(0);
+    rnd.nextBytes(bytes);
+
+    assertEqual(CRC32BytesUsingUpdateInt(bytes, 0, 8 * 1024),
+                CRC32ByteBuffer(bytes, 0, 8 * 1024));
+
+    int off = rnd.nextInt(bytes.length / 2);
+    for (int len = 0; len <= 16; ++len) {
+      assertEqual(CRC32BytesUsingUpdateInt(bytes, off, len),
+                  CRC32ByteBuffer(bytes, off, len));
+    }
+
+    // Check there are no issues with unaligned accesses.
+    for (int o = 1; o < 8; ++o) {
+      for (int l = 0; l <= 16; ++l) {
+        assertEqual(CRC32BytesUsingUpdateInt(bytes, o, l),
+                    CRC32ByteBuffer(bytes, o, l));
+      }
+    }
+
+    int len = bytes.length / 2;
+    assertEqual(CRC32BytesUsingUpdateInt(bytes, 0, len - 1),
+                CRC32ByteBuffer(bytes, 0, len - 1));
+    assertEqual(CRC32BytesUsingUpdateInt(bytes, 0, len),
+                CRC32ByteBuffer(bytes, 0, len));
+    assertEqual(CRC32BytesUsingUpdateInt(bytes, 0, len + 1),
+                CRC32ByteBuffer(bytes, 0, len + 1));
+
+    len = rnd.nextInt(bytes.length + 1);
+    off = rnd.nextInt(bytes.length - len);
+    assertEqual(CRC32BytesUsingUpdateInt(bytes, off, len),
+                CRC32ByteBuffer(bytes, off, len));
+  }
+
+  private static long CRC32DirectByteBuffer(byte[] bytes, int off, int len) {
+    final int total_len = off + len;
+    ByteBuffer buf = ByteBuffer.allocateDirect(total_len).put(bytes, 0, total_len);
+    buf.position(off);
+    CRC32 crc32 = new CRC32();
+    crc32.update(buf);
+    return crc32.getValue();
+  }
+
+  private static long CRC32ByteAndDirectByteBuffer(int value, byte[] bytes) {
+    ByteBuffer buf = ByteBuffer.allocateDirect(bytes.length).put(bytes);
+    buf.position(0);
+    CRC32 crc32 = new CRC32();
+    crc32.update(value);
+    crc32.update(buf);
+    return crc32.getValue();
+  }
+
+  private static long CRC32DirectByteBufferAndByte(byte[] bytes, int value) {
+    ByteBuffer buf = ByteBuffer.allocateDirect(bytes.length).put(bytes);
+    buf.position(0);
+    CRC32 crc32 = new CRC32();
+    crc32.update(buf);
+    crc32.update(value);
+    return crc32.getValue();
+  }
+
+  private static void TestCRC32UpdateDirectByteBuffer() {
+    assertEqual(0L, CRC32DirectByteBuffer(new byte[] {}, 0, 0));
+    assertEqual(0L, CRC32DirectByteBuffer(new byte[] {0}, 0, 0));
+    assertEqual(0L, CRC32DirectByteBuffer(new byte[] {0}, 1, 0));
+    assertEqual(0L, CRC32DirectByteBuffer(new byte[] {0, 0}, 1, 0));
+
+    assertEqual(CRC32Byte(0), CRC32DirectByteBuffer(new byte[] {0}, 0, 1));
+    assertEqual(CRC32Byte(1), CRC32DirectByteBuffer(new byte[] {1}, 0, 1));
+    assertEqual(CRC32Byte(0x0f), CRC32DirectByteBuffer(new byte[] {0x0f}, 0, 1));
+    assertEqual(CRC32Byte(0xff), CRC32DirectByteBuffer(new byte[] {-1}, 0, 1));
+    assertEqual(CRC32BytesUsingUpdateInt(0, 0, 0),
+                CRC32DirectByteBuffer(new byte[] {0, 0, 0}, 0, 3));
+    assertEqual(CRC32BytesUsingUpdateInt(1, 1, 1),
+                CRC32DirectByteBuffer(new byte[] {1, 1, 1}, 0, 3));
+    assertEqual(CRC32BytesUsingUpdateInt(0x0f, 0x0f, 0x0f),
+                CRC32DirectByteBuffer(new byte[] {0x0f, 0x0f, 0x0f}, 0, 3));
+    assertEqual(CRC32BytesUsingUpdateInt(0xff, 0xff, 0xff),
+                CRC32DirectByteBuffer(new byte[] {-1, -1, -1}, 0, 3));
+    assertEqual(CRC32BytesUsingUpdateInt(1, 2),
+                CRC32DirectByteBuffer(new byte[] {1, 2}, 0, 2));
+    assertEqual(
+        CRC32BytesUsingUpdateInt(0, -1, Byte.MIN_VALUE, Byte.MAX_VALUE),
+        CRC32DirectByteBuffer(new byte[] {0, -1, Byte.MIN_VALUE, Byte.MAX_VALUE}, 0, 4));
+
+    assertEqual(CRC32BytesUsingUpdateInt(0, 0, 0),
+                CRC32ByteAndDirectByteBuffer(0, new byte[] {0, 0}));
+    assertEqual(CRC32BytesUsingUpdateInt(1, 1, 1),
+                CRC32ByteAndDirectByteBuffer(1, new byte[] {1, 1}));
+    assertEqual(CRC32BytesUsingUpdateInt(0x0f, 0x0f, 0x0f),
+                CRC32ByteAndDirectByteBuffer(0x0f, new byte[] {0x0f, 0x0f}));
+    assertEqual(CRC32BytesUsingUpdateInt(0xff, 0xff, 0xff),
+                CRC32ByteAndDirectByteBuffer(-1, new byte[] {-1, -1}));
+    assertEqual(CRC32BytesUsingUpdateInt(1, 2, 3),
+                CRC32ByteAndDirectByteBuffer(1, new byte[] {2, 3}));
+    assertEqual(
+        CRC32BytesUsingUpdateInt(0, -1, Byte.MIN_VALUE, Byte.MAX_VALUE),
+        CRC32ByteAndDirectByteBuffer(0, new byte[] {-1, Byte.MIN_VALUE, Byte.MAX_VALUE}));
+
+    assertEqual(CRC32BytesUsingUpdateInt(0, 0, 0),
+                CRC32DirectByteBufferAndByte(new byte[] {0, 0}, 0));
+    assertEqual(CRC32BytesUsingUpdateInt(1, 1, 1),
+                CRC32DirectByteBufferAndByte(new byte[] {1, 1}, 1));
+    assertEqual(CRC32BytesUsingUpdateInt(0x0f, 0x0f, 0x0f),
+                CRC32DirectByteBufferAndByte(new byte[] {0x0f, 0x0f}, 0x0f));
+    assertEqual(CRC32BytesUsingUpdateInt(0xff, 0xff, 0xff),
+                CRC32DirectByteBufferAndByte(new byte[] {-1, -1}, -1));
+    assertEqual(CRC32BytesUsingUpdateInt(1, 2, 3),
+                CRC32DirectByteBufferAndByte(new byte[] {1, 2}, 3));
+    assertEqual(
+        CRC32BytesUsingUpdateInt(0, -1, Byte.MIN_VALUE, Byte.MAX_VALUE),
+        CRC32DirectByteBufferAndByte(new byte[] {0, -1, Byte.MIN_VALUE}, Byte.MAX_VALUE));
+
+    byte[] bytes = new byte[128 * 1024];
+    Random rnd = new Random(0);
+    rnd.nextBytes(bytes);
+
+    assertEqual(CRC32BytesUsingUpdateInt(bytes, 0, bytes.length),
+                CRC32DirectByteBuffer(bytes, 0, bytes.length));
+    assertEqual(CRC32BytesUsingUpdateInt(bytes, 0, 8 * 1024),
+                CRC32DirectByteBuffer(bytes, 0, 8 * 1024));
+
+    int off = rnd.nextInt(bytes.length / 2);
+    for (int len = 0; len <= 16; ++len) {
+      assertEqual(CRC32BytesUsingUpdateInt(bytes, off, len),
+                  CRC32DirectByteBuffer(bytes, off, len));
+    }
+
+    // Check there are no issues with unaligned accesses.
+    for (int o = 1; o < 8; ++o) {
+      for (int l = 0; l <= 16; ++l) {
+        assertEqual(CRC32BytesUsingUpdateInt(bytes, o, l),
+                    CRC32DirectByteBuffer(bytes, o, l));
+      }
+    }
+
+    int len = bytes.length / 2;
+    assertEqual(CRC32BytesUsingUpdateInt(bytes, 0, len - 1),
+                CRC32DirectByteBuffer(bytes, 0, len - 1));
+    assertEqual(CRC32BytesUsingUpdateInt(bytes, 0, len),
+                CRC32DirectByteBuffer(bytes, 0, len));
+    assertEqual(CRC32BytesUsingUpdateInt(bytes, 0, len + 1),
+                CRC32DirectByteBuffer(bytes, 0, len + 1));
+
+    len = rnd.nextInt(bytes.length + 1);
+    off = rnd.nextInt(bytes.length - len);
+    assertEqual(CRC32BytesUsingUpdateInt(bytes, off, len),
+                CRC32DirectByteBuffer(bytes, off, len));
+  }
+
   public static void main(String args[]) {
     TestCRC32Update();
     TestCRC32UpdateBytes();
+    TestCRC32UpdateByteBuffer();
+    TestCRC32UpdateDirectByteBuffer();
   }
 }
diff --git a/test/etc/run-test-jar b/test/etc/run-test-jar
index 0674f52..25b8b4b 100755
--- a/test/etc/run-test-jar
+++ b/test/etc/run-test-jar
@@ -1032,7 +1032,8 @@
       # Note: We first send SIGRTMIN+2 (usually 36) to ART, which will induce a full thread dump
       #       before abort. However, dumping threads might deadlock, so we also use the "-k"
       #       option to definitely kill the child.
-      cmdline="timeout -k 120s -s SIGRTMIN+2 ${TIME_OUT_VALUE}s ${TIMEOUT_DUMPER} $cmdline"
+      # Note: Using "--foreground" to not propagate the signal to children, i.e., the runtime.
+      cmdline="timeout --foreground -k 120s -s SIGRTMIN+2 ${TIME_OUT_VALUE}s ${TIMEOUT_DUMPER} $cmdline"
     fi
 
     if [ "$DEV_MODE" = "y" ]; then
diff --git a/tools/bootjars.sh b/tools/bootjars.sh
index 9f22827..320d4b5 100755
--- a/tools/bootjars.sh
+++ b/tools/bootjars.sh
@@ -75,7 +75,7 @@
   # Note: This must start with the CORE_IMG_JARS in Android.common_path.mk
   # because that's what we use for compiling the core.art image.
   # It may contain additional modules from TEST_CORE_JARS.
-  core_jars_list="core-oj core-libart core-simple"
+  core_jars_list="core-oj core-libart core-simple okhttp bouncycastle apache-xml conscrypt"
   core_jars_suffix=
   if [[ $mode == target ]]; then
     core_jars_suffix=-testdex
diff --git a/tools/build_linux_bionic.sh b/tools/build_linux_bionic.sh
index 94ccc41..d3c1912 100755
--- a/tools/build_linux_bionic.sh
+++ b/tools/build_linux_bionic.sh
@@ -42,6 +42,7 @@
 
 out_dir=$(get_build_var OUT_DIR)
 host_out=$(get_build_var HOST_OUT)
+mk_product_out=$(get_build_var PRODUCT_OUT)
 
 # TODO(b/31559095) Figure out a better way to do this.
 #
@@ -52,6 +53,12 @@
 cat $out_dir/soong/soong.variables > ${tmp_soong_var}
 build/soong/soong_ui.bash --make-mode clean
 mkdir -p $out_dir/soong
+mkdir -p $mk_product_out
+
+# TODO(b/31559095) Soong will panic if this file isn't present. It contains
+# information from MAKE needed to let soong handle the invocation of dex2oat.
+# This would be great to have but for now isn't needed.
+echo "{}" > $mk_product_out/dexpreopt.config
 
 python3 <<END - ${tmp_soong_var} ${out_dir}/soong/soong.variables
 import json
diff --git a/tools/libcore_failures.txt b/tools/libcore_failures.txt
index 4dc5ced..3c65b01 100644
--- a/tools/libcore_failures.txt
+++ b/tools/libcore_failures.txt
@@ -200,8 +200,9 @@
   names: ["libcore.libcore.io.FdsanTest#testSocket"]
 },
 {
-  description: "Unexpected failures",
+  description: "Host implementation of android_getaddrinfo differs from device implementation",
   result: EXEC_FAILED,
+  modes: [host],
   bug: 121230364,
   names: [
     "libcore.libcore.net.InetAddressUtilsTest#parseNumericAddress[8]",
diff --git a/tools/veridex/veridex.cc b/tools/veridex/veridex.cc
index 96100b2..46ab8aa 100644
--- a/tools/veridex/veridex.cc
+++ b/tools/veridex/veridex.cc
@@ -126,6 +126,7 @@
   static int Run(int argc, char** argv) {
     VeridexOptions options;
     ParseArgs(&options, argc, argv);
+    android::base::InitLogging(argv);
 
     if (!options.dex_file) {
       LOG(ERROR) << "Required argument '" << kDexFileOption << "' not provided.";