Merge "Do not mention x86 floating point numbers in CFI."
diff --git a/build/Android.gtest.mk b/build/Android.gtest.mk
index e0f0ae5..7d76795 100644
--- a/build/Android.gtest.mk
+++ b/build/Android.gtest.mk
@@ -191,6 +191,7 @@
   compiler/dex/mir_graph_test.cc \
   compiler/dex/mir_optimization_test.cc \
   compiler/dex/quick/quick_cfi_test.cc \
+  compiler/dex/type_inference_test.cc \
   compiler/dwarf/dwarf_test.cc \
   compiler/driver/compiler_driver_test.cc \
   compiler/elf_writer_test.cc \
@@ -227,6 +228,7 @@
   compiler/utils/arena_allocator_test.cc \
   compiler/utils/dedupe_set_test.cc \
   compiler/utils/swap_space_test.cc \
+  compiler/utils/test_dex_file_builder_test.cc \
   compiler/utils/arm/managed_register_arm_test.cc \
   compiler/utils/arm64/managed_register_arm64_test.cc \
   compiler/utils/x86/managed_register_x86_test.cc \
diff --git a/cmdline/cmdline_parser_test.cc b/cmdline/cmdline_parser_test.cc
index 9f873b3..1386439 100644
--- a/cmdline/cmdline_parser_test.cc
+++ b/cmdline/cmdline_parser_test.cc
@@ -23,7 +23,7 @@
 #include "gtest/gtest.h"
 
 #define EXPECT_NULL(expected) EXPECT_EQ(reinterpret_cast<const void*>(expected), \
-                                        reinterpret_cast<void*>(NULL));
+                                        reinterpret_cast<void*>(nullptr));
 
 namespace art {
   bool UsuallyEquals(double expected, double actual);
diff --git a/compiler/Android.mk b/compiler/Android.mk
index ac95abd..0ad77b4 100644
--- a/compiler/Android.mk
+++ b/compiler/Android.mk
@@ -23,6 +23,7 @@
 	dex/global_value_numbering.cc \
 	dex/gvn_dead_code_elimination.cc \
 	dex/local_value_numbering.cc \
+	dex/type_inference.cc \
 	dex/quick/arm/assemble_arm.cc \
 	dex/quick/arm/call_arm.cc \
 	dex/quick/arm/fp_arm.cc \
diff --git a/compiler/common_compiler_test.cc b/compiler/common_compiler_test.cc
index 05cb8b4..5a9e04f 100644
--- a/compiler/common_compiler_test.cc
+++ b/compiler/common_compiler_test.cc
@@ -263,7 +263,7 @@
   mirror::Class* klass = class_linker_->FindClass(self, class_descriptor.c_str(), class_loader);
   CHECK(klass != nullptr) << "Class not found " << class_name;
   mirror::ArtMethod* method = klass->FindVirtualMethod(method_name, signature);
-  CHECK(method != NULL) << "Virtual method not found: "
+  CHECK(method != nullptr) << "Virtual method not found: "
       << class_name << "." << method_name << signature;
   CompileMethod(method);
 }
diff --git a/compiler/compiled_method.cc b/compiler/compiled_method.cc
index 4f7a970..d1acada 100644
--- a/compiler/compiled_method.cc
+++ b/compiler/compiled_method.cc
@@ -108,7 +108,7 @@
     }
     default:
       LOG(FATAL) << "Unknown InstructionSet: " << instruction_set;
-      return NULL;
+      return nullptr;
   }
 }
 
diff --git a/compiler/dex/dataflow_iterator-inl.h b/compiler/dex/dataflow_iterator-inl.h
index 6e25db6..83dfc28 100644
--- a/compiler/dex/dataflow_iterator-inl.h
+++ b/compiler/dex/dataflow_iterator-inl.h
@@ -23,7 +23,7 @@
 
 // Single forward pass over the nodes.
 inline BasicBlock* DataflowIterator::ForwardSingleNext() {
-  BasicBlock* res = NULL;
+  BasicBlock* res = nullptr;
 
   // Are we not yet at the end?
   if (idx_ < end_idx_) {
@@ -38,7 +38,7 @@
 
 // Repeat full forward passes over all nodes until no change occurs during a complete pass.
 inline BasicBlock* DataflowIterator::ForwardRepeatNext() {
-  BasicBlock* res = NULL;
+  BasicBlock* res = nullptr;
 
   // Are we at the end and have we changed something?
   if ((idx_ >= end_idx_) && changed_ == true) {
@@ -61,7 +61,7 @@
 
 // Single reverse pass over the nodes.
 inline BasicBlock* DataflowIterator::ReverseSingleNext() {
-  BasicBlock* res = NULL;
+  BasicBlock* res = nullptr;
 
   // Are we not yet at the end?
   if (idx_ >= 0) {
@@ -76,7 +76,7 @@
 
 // Repeat full backwards passes over all nodes until no change occurs during a complete pass.
 inline BasicBlock* DataflowIterator::ReverseRepeatNext() {
-  BasicBlock* res = NULL;
+  BasicBlock* res = nullptr;
 
   // Are we done and we changed something during the last iteration?
   if ((idx_ < 0) && changed_) {
diff --git a/compiler/dex/dataflow_iterator.h b/compiler/dex/dataflow_iterator.h
index 2a06cec..097c2a4 100644
--- a/compiler/dex/dataflow_iterator.h
+++ b/compiler/dex/dataflow_iterator.h
@@ -72,7 +72,7 @@
           : mir_graph_(mir_graph),
             start_idx_(start_idx),
             end_idx_(end_idx),
-            block_id_list_(NULL),
+            block_id_list_(nullptr),
             idx_(0),
             repeats_(0),
             changed_(false) {}
diff --git a/compiler/dex/dex_to_dex_compiler.cc b/compiler/dex/dex_to_dex_compiler.cc
index ef94d8b..d1ddfda 100644
--- a/compiler/dex/dex_to_dex_compiler.cc
+++ b/compiler/dex/dex_to_dex_compiler.cc
@@ -301,7 +301,7 @@
                               art::DexToDexCompilationLevel dex_to_dex_compilation_level) {
   UNUSED(invoke_type);
   if (dex_to_dex_compilation_level != art::kDontDexToDexCompile) {
-    art::DexCompilationUnit unit(NULL, class_loader, art::Runtime::Current()->GetClassLinker(),
+    art::DexCompilationUnit unit(nullptr, class_loader, art::Runtime::Current()->GetClassLinker(),
                                  dex_file, code_item, class_def_idx, method_idx, access_flags,
                                  driver.GetVerifiedMethod(&dex_file, method_idx));
     art::optimizer::DexCompiler dex_compiler(driver, unit, dex_to_dex_compilation_level);
diff --git a/compiler/dex/global_value_numbering_test.cc b/compiler/dex/global_value_numbering_test.cc
index b4559ef..c538d0b 100644
--- a/compiler/dex/global_value_numbering_test.cc
+++ b/compiler/dex/global_value_numbering_test.cc
@@ -15,7 +15,6 @@
  */
 
 #include "base/logging.h"
-#include "dataflow_iterator.h"
 #include "dataflow_iterator-inl.h"
 #include "dex/mir_field_info.h"
 #include "global_value_numbering.h"
@@ -260,10 +259,8 @@
       mir->ssa_rep = &ssa_reps_[i];
       mir->ssa_rep->num_uses = def->num_uses;
       mir->ssa_rep->uses = const_cast<int32_t*>(def->uses);  // Not modified by LVN.
-      mir->ssa_rep->fp_use = nullptr;  // Not used by LVN.
       mir->ssa_rep->num_defs = def->num_defs;
       mir->ssa_rep->defs = const_cast<int32_t*>(def->defs);  // Not modified by LVN.
-      mir->ssa_rep->fp_def = nullptr;  // Not used by LVN.
       mir->dalvikInsn.opcode = def->opcode;
       mir->offset = i;  // LVN uses offset only for debug output
       mir->optimization_flags = 0u;
diff --git a/compiler/dex/gvn_dead_code_elimination.cc b/compiler/dex/gvn_dead_code_elimination.cc
index ec12221..d7f36f7 100644
--- a/compiler/dex/gvn_dead_code_elimination.cc
+++ b/compiler/dex/gvn_dead_code_elimination.cc
@@ -478,7 +478,7 @@
       mir->dalvikInsn.opcode - Instruction::ADD_INT_2ADDR +  Instruction::ADD_INT);
 }
 
-MIR* GvnDeadCodeElimination::CreatePhi(int s_reg, bool fp) {
+MIR* GvnDeadCodeElimination::CreatePhi(int s_reg) {
   int v_reg = mir_graph_->SRegToVReg(s_reg);
   MIR* phi = mir_graph_->NewMIR();
   phi->dalvikInsn.opcode = static_cast<Instruction::Code>(kMirOpPhi);
@@ -491,11 +491,9 @@
 
   mir_graph_->AllocateSSADefData(phi, 1);
   phi->ssa_rep->defs[0] = s_reg;
-  phi->ssa_rep->fp_def[0] = fp;
 
   size_t num_uses = bb_->predecessors.size();
   mir_graph_->AllocateSSAUseData(phi, num_uses);
-  std::fill_n(phi->ssa_rep->fp_use, num_uses, fp);
   size_t idx = 0u;
   for (BasicBlockId pred_id : bb_->predecessors) {
     BasicBlock* pred_bb = mir_graph_->GetBasicBlock(pred_id);
@@ -523,14 +521,12 @@
   // defining MIR for that dalvik reg, the preserved valus must come from its predecessors
   // and we need to create a new Phi (a degenerate Phi if there's only a single predecessor).
   if (def_change == kNPos) {
-    bool fp = mir_to_kill->ssa_rep->fp_def[0];
     if (wide) {
       DCHECK_EQ(new_s_reg + 1, mir_to_kill->ssa_rep->defs[1]);
-      DCHECK_EQ(fp, mir_to_kill->ssa_rep->fp_def[1]);
       DCHECK_EQ(mir_graph_->SRegToVReg(new_s_reg) + 1, mir_graph_->SRegToVReg(new_s_reg + 1));
-      CreatePhi(new_s_reg + 1, fp);  // High word Phi.
+      CreatePhi(new_s_reg + 1);  // High word Phi.
     }
-    return CreatePhi(new_s_reg, fp);
+    return CreatePhi(new_s_reg);
   } else {
     DCHECK_LT(def_change, last_change);
     DCHECK_LE(last_change, vreg_chains_.NumMIRs());
diff --git a/compiler/dex/gvn_dead_code_elimination.h b/compiler/dex/gvn_dead_code_elimination.h
index 9a19f29..f2378f2 100644
--- a/compiler/dex/gvn_dead_code_elimination.h
+++ b/compiler/dex/gvn_dead_code_elimination.h
@@ -128,7 +128,7 @@
   void KillMIR(MIRData* data);
   static void KillMIR(MIR* mir);
   static void ChangeBinOp2AddrToPlainBinOp(MIR* mir);
-  MIR* CreatePhi(int s_reg, bool fp);
+  MIR* CreatePhi(int s_reg);
   MIR* RenameSRegDefOrCreatePhi(uint16_t def_change, uint16_t last_change, MIR* mir_to_kill);
 
   // Update state variables going backwards through a MIR.
diff --git a/compiler/dex/local_value_numbering_test.cc b/compiler/dex/local_value_numbering_test.cc
index 566527a..0393410 100644
--- a/compiler/dex/local_value_numbering_test.cc
+++ b/compiler/dex/local_value_numbering_test.cc
@@ -158,10 +158,8 @@
       mir->ssa_rep = &ssa_reps_[i];
       mir->ssa_rep->num_uses = def->num_uses;
       mir->ssa_rep->uses = const_cast<int32_t*>(def->uses);  // Not modified by LVN.
-      mir->ssa_rep->fp_use = nullptr;  // Not used by LVN.
       mir->ssa_rep->num_defs = def->num_defs;
       mir->ssa_rep->defs = const_cast<int32_t*>(def->defs);  // Not modified by LVN.
-      mir->ssa_rep->fp_def = nullptr;  // Not used by LVN.
       mir->dalvikInsn.opcode = def->opcode;
       mir->offset = i;  // LVN uses offset only for debug output
       mir->optimization_flags = 0u;
diff --git a/compiler/dex/mir_analysis.cc b/compiler/dex/mir_analysis.cc
index 3d7a640..9099e8a 100644
--- a/compiler/dex/mir_analysis.cc
+++ b/compiler/dex/mir_analysis.cc
@@ -968,7 +968,7 @@
    * edges until we reach an explicit branch or return.
    */
   BasicBlock* ending_bb = bb;
-  if (ending_bb->last_mir_insn != NULL) {
+  if (ending_bb->last_mir_insn != nullptr) {
     uint32_t ending_flags = kAnalysisAttributes[ending_bb->last_mir_insn->dalvikInsn.opcode];
     while ((ending_flags & kAnBranch) == 0) {
       ending_bb = GetBasicBlock(ending_bb->fall_through);
@@ -998,7 +998,7 @@
   bool done = false;
   while (!done) {
     tbb->visited = true;
-    for (MIR* mir = tbb->first_mir_insn; mir != NULL; mir = mir->next) {
+    for (MIR* mir = tbb->first_mir_insn; mir != nullptr; mir = mir->next) {
       if (MIR::DecodedInstruction::IsPseudoMirOp(mir->dalvikInsn.opcode)) {
         // Skip any MIR pseudo-op.
         continue;
@@ -1195,7 +1195,7 @@
 
   ClearAllVisitedFlags();
   AllNodesIterator iter(this);
-  for (BasicBlock* bb = iter.Next(); bb != NULL; bb = iter.Next()) {
+  for (BasicBlock* bb = iter.Next(); bb != nullptr; bb = iter.Next()) {
     AnalyzeBlock(bb, &stats);
   }
 
diff --git a/compiler/dex/mir_dataflow.cc b/compiler/dex/mir_dataflow.cc
index 2a920a4..b4aec98 100644
--- a/compiler/dex/mir_dataflow.cc
+++ b/compiler/dex/mir_dataflow.cc
@@ -123,7 +123,7 @@
   DF_UA | DF_NULL_CHK_A | DF_REF_A,
 
   // 1F CHK_CAST vAA, type@BBBB
-  DF_UA | DF_REF_A | DF_UMS,
+  DF_UA | DF_REF_A | DF_CHK_CAST | DF_UMS,
 
   // 20 INSTANCE_OF vA, vB, type@CCCC
   DF_DA | DF_UB | DF_CORE_A | DF_REF_B | DF_UMS,
@@ -159,10 +159,10 @@
   DF_NOP,
 
   // 2B PACKED_SWITCH vAA, +BBBBBBBB
-  DF_UA,
+  DF_UA | DF_CORE_A,
 
   // 2C SPARSE_SWITCH vAA, +BBBBBBBB
-  DF_UA,
+  DF_UA | DF_CORE_A,
 
   // 2D CMPL_FLOAT vAA, vBB, vCC
   DF_DA | DF_UB | DF_UC | DF_FP_B | DF_FP_C | DF_CORE_A,
@@ -180,22 +180,22 @@
   DF_DA | DF_UB | DF_B_WIDE | DF_UC | DF_C_WIDE | DF_CORE_A | DF_CORE_B | DF_CORE_C,
 
   // 32 IF_EQ vA, vB, +CCCC
-  DF_UA | DF_UB,
+  DF_UA | DF_UB | DF_SAME_TYPE_AB,
 
   // 33 IF_NE vA, vB, +CCCC
-  DF_UA | DF_UB,
+  DF_UA | DF_UB | DF_SAME_TYPE_AB,
 
   // 34 IF_LT vA, vB, +CCCC
-  DF_UA | DF_UB,
+  DF_UA | DF_UB | DF_SAME_TYPE_AB,
 
   // 35 IF_GE vA, vB, +CCCC
-  DF_UA | DF_UB,
+  DF_UA | DF_UB | DF_SAME_TYPE_AB,
 
   // 36 IF_GT vA, vB, +CCCC
-  DF_UA | DF_UB,
+  DF_UA | DF_UB | DF_SAME_TYPE_AB,
 
   // 37 IF_LE vA, vB, +CCCC
-  DF_UA | DF_UB,
+  DF_UA | DF_UB | DF_SAME_TYPE_AB,
 
   // 38 IF_EQZ vAA, +BBBB
   DF_UA,
@@ -989,7 +989,7 @@
   MIR* mir;
   ArenaBitVector *use_v, *def_v, *live_in_v;
 
-  if (bb->data_flow_info == NULL) return false;
+  if (bb->data_flow_info == nullptr) return false;
 
   use_v = bb->data_flow_info->use_v =
       new (arena_) ArenaBitVector(arena_, GetNumOfCodeAndTempVRs(), false, kBitMapUse);
@@ -998,7 +998,7 @@
   live_in_v = bb->data_flow_info->live_in_v =
       new (arena_) ArenaBitVector(arena_, GetNumOfCodeAndTempVRs(), false, kBitMapLiveIn);
 
-  for (mir = bb->first_mir_insn; mir != NULL; mir = mir->next) {
+  for (mir = bb->first_mir_insn; mir != nullptr; mir = mir->next) {
     uint64_t df_attributes = GetDataFlowAttributes(mir);
     MIR::DecodedInstruction* d_insn = &mir->dalvikInsn;
 
@@ -1080,8 +1080,6 @@
 
   if (mir->ssa_rep->num_uses_allocated < num_uses) {
     mir->ssa_rep->uses = arena_->AllocArray<int32_t>(num_uses, kArenaAllocDFInfo);
-    // NOTE: will be filled in during type & size inference pass
-    mir->ssa_rep->fp_use = arena_->AllocArray<bool>(num_uses, kArenaAllocDFInfo);
   }
 }
 
@@ -1090,7 +1088,6 @@
 
   if (mir->ssa_rep->num_defs_allocated < num_defs) {
     mir->ssa_rep->defs = arena_->AllocArray<int32_t>(num_defs, kArenaAllocDFInfo);
-    mir->ssa_rep->fp_def = arena_->AllocArray<bool>(num_defs, kArenaAllocDFInfo);
   }
 }
 
@@ -1191,7 +1188,7 @@
 
 /* Entry function to convert a block into SSA representation */
 bool MIRGraph::DoSSAConversion(BasicBlock* bb) {
-  if (bb->data_flow_info == NULL) return false;
+  if (bb->data_flow_info == nullptr) return false;
 
   /*
    * Pruned SSA form: Insert phi nodes for each dalvik register marked in phi_node_blocks
@@ -1214,7 +1211,7 @@
     }
   }
 
-  for (MIR* mir = bb->first_mir_insn; mir != NULL; mir = mir->next) {
+  for (MIR* mir = bb->first_mir_insn; mir != nullptr; mir = mir->next) {
     mir->ssa_rep =
         static_cast<struct SSARepresentation *>(arena_->Alloc(sizeof(SSARepresentation),
                                                               kArenaAllocDFInfo));
@@ -1287,35 +1284,27 @@
     if (df_attributes & DF_HAS_USES) {
       num_uses = 0;
       if (df_attributes & DF_UA) {
-        mir->ssa_rep->fp_use[num_uses] = df_attributes & DF_FP_A;
         HandleSSAUse(mir->ssa_rep->uses, d_insn->vA, num_uses++);
         if (df_attributes & DF_A_WIDE) {
-          mir->ssa_rep->fp_use[num_uses] = df_attributes & DF_FP_A;
           HandleSSAUse(mir->ssa_rep->uses, d_insn->vA+1, num_uses++);
         }
       }
       if (df_attributes & DF_UB) {
-        mir->ssa_rep->fp_use[num_uses] = df_attributes & DF_FP_B;
         HandleSSAUse(mir->ssa_rep->uses, d_insn->vB, num_uses++);
         if (df_attributes & DF_B_WIDE) {
-          mir->ssa_rep->fp_use[num_uses] = df_attributes & DF_FP_B;
           HandleSSAUse(mir->ssa_rep->uses, d_insn->vB+1, num_uses++);
         }
       }
       if (df_attributes & DF_UC) {
-        mir->ssa_rep->fp_use[num_uses] = df_attributes & DF_FP_C;
         HandleSSAUse(mir->ssa_rep->uses, d_insn->vC, num_uses++);
         if (df_attributes & DF_C_WIDE) {
-          mir->ssa_rep->fp_use[num_uses] = df_attributes & DF_FP_C;
           HandleSSAUse(mir->ssa_rep->uses, d_insn->vC+1, num_uses++);
         }
       }
     }
     if (df_attributes & DF_HAS_DEFS) {
-      mir->ssa_rep->fp_def[0] = df_attributes & DF_FP_A;
       HandleSSADef(mir->ssa_rep->defs, d_insn->vA, 0);
       if (df_attributes & DF_A_WIDE) {
-        mir->ssa_rep->fp_def[1] = df_attributes & DF_FP_A;
         HandleSSADef(mir->ssa_rep->defs, d_insn->vA+1, 1);
       }
     }
@@ -1413,8 +1402,8 @@
     return;
   }
   uint32_t weight = GetUseCountWeight(bb);
-  for (MIR* mir = bb->first_mir_insn; (mir != NULL); mir = mir->next) {
-    if (mir->ssa_rep == NULL) {
+  for (MIR* mir = bb->first_mir_insn; (mir != nullptr); mir = mir->next) {
+    if (mir->ssa_rep == nullptr) {
       continue;
     }
     for (int i = 0; i < mir->ssa_rep->num_uses; i++) {
@@ -1459,7 +1448,7 @@
 void MIRGraph::VerifyDataflow() {
     /* Verify if all blocks are connected as claimed */
   AllNodesIterator iter(this);
-  for (BasicBlock* bb = iter.Next(); bb != NULL; bb = iter.Next()) {
+  for (BasicBlock* bb = iter.Next(); bb != nullptr; bb = iter.Next()) {
     VerifyPredInfo(bb);
   }
 }
diff --git a/compiler/dex/mir_field_info.h b/compiler/dex/mir_field_info.h
index ca56958..e4570fd 100644
--- a/compiler/dex/mir_field_info.h
+++ b/compiler/dex/mir_field_info.h
@@ -124,7 +124,7 @@
   uint16_t declaring_field_idx_;
   // The type index of the class declaring the field, 0 if unresolved.
   uint16_t declaring_class_idx_;
-  // The dex file that defines the class containing the field and the field, nullptr if unresolved.
+  // The dex file that defines the class containing the field and the field, null if unresolved.
   const DexFile* declaring_dex_file_;
 };
 
@@ -179,6 +179,7 @@
   friend class GlobalValueNumberingTest;
   friend class GvnDeadCodeEliminationTest;
   friend class LocalValueNumberingTest;
+  friend class TypeInferenceTest;
 };
 
 class MirSFieldLoweringInfo : public MirFieldInfo {
@@ -254,6 +255,7 @@
   friend class GlobalValueNumberingTest;
   friend class GvnDeadCodeEliminationTest;
   friend class LocalValueNumberingTest;
+  friend class TypeInferenceTest;
 };
 
 }  // namespace art
diff --git a/compiler/dex/mir_graph.cc b/compiler/dex/mir_graph.cc
index 4d34038..b5c42f1 100644
--- a/compiler/dex/mir_graph.cc
+++ b/compiler/dex/mir_graph.cc
@@ -81,15 +81,15 @@
 };
 
 MIRGraph::MIRGraph(CompilationUnit* cu, ArenaAllocator* arena)
-    : reg_location_(NULL),
+    : reg_location_(nullptr),
       block_id_map_(std::less<unsigned int>(), arena->Adapter()),
       cu_(cu),
       ssa_base_vregs_(arena->Adapter(kArenaAllocSSAToDalvikMap)),
       ssa_subscripts_(arena->Adapter(kArenaAllocSSAToDalvikMap)),
-      vreg_to_ssa_map_(NULL),
-      ssa_last_defs_(NULL),
-      is_constant_v_(NULL),
-      constant_values_(NULL),
+      vreg_to_ssa_map_(nullptr),
+      ssa_last_defs_(nullptr),
+      is_constant_v_(nullptr),
+      constant_values_(nullptr),
       use_counts_(arena->Adapter()),
       raw_use_counts_(arena->Adapter()),
       num_reachable_blocks_(0),
@@ -106,24 +106,24 @@
       topological_order_indexes_(arena->Adapter(kArenaAllocTopologicalSortOrder)),
       topological_order_loop_head_stack_(arena->Adapter(kArenaAllocTopologicalSortOrder)),
       max_nested_loops_(0u),
-      i_dom_list_(NULL),
+      i_dom_list_(nullptr),
       temp_scoped_alloc_(),
       block_list_(arena->Adapter(kArenaAllocBBList)),
-      try_block_addr_(NULL),
-      entry_block_(NULL),
-      exit_block_(NULL),
-      current_code_item_(NULL),
+      try_block_addr_(nullptr),
+      entry_block_(nullptr),
+      exit_block_(nullptr),
+      current_code_item_(nullptr),
       m_units_(arena->Adapter()),
       method_stack_(arena->Adapter()),
       current_method_(kInvalidEntry),
       current_offset_(kInvalidEntry),
       def_count_(0),
-      opcode_count_(NULL),
+      opcode_count_(nullptr),
       num_ssa_regs_(0),
       extended_basic_blocks_(arena->Adapter()),
       method_sreg_(0),
       attributes_(METHOD_IS_LEAF),  // Start with leaf assumption, change on encountering invoke.
-      checkstats_(NULL),
+      checkstats_(nullptr),
       arena_(arena),
       backward_branches_(0),
       forward_branches_(0),
@@ -185,13 +185,13 @@
                                  BasicBlock* orig_block, BasicBlock** immed_pred_block_p) {
   DCHECK_GT(code_offset, orig_block->start_offset);
   MIR* insn = orig_block->first_mir_insn;
-  MIR* prev = NULL;  // Will be set to instruction before split.
+  MIR* prev = nullptr;  // Will be set to instruction before split.
   while (insn) {
     if (insn->offset == code_offset) break;
     prev = insn;
     insn = insn->next;
   }
-  if (insn == NULL) {
+  if (insn == nullptr) {
     LOG(FATAL) << "Break split failed";
   }
   // Now insn is at the instruction where we want to split, namely
@@ -530,7 +530,7 @@
     size = switch_data[1];
     first_key = switch_data[2] | (switch_data[3] << 16);
     target_table = reinterpret_cast<const int*>(&switch_data[4]);
-    keyTable = NULL;        // Make the compiler happy.
+    keyTable = nullptr;        // Make the compiler happy.
   /*
    * Sparse switch data format:
    *  ushort ident = 0x0200   magic value
@@ -695,9 +695,10 @@
   current_method_ = m_units_.size();
   current_offset_ = 0;
   // TODO: will need to snapshot stack image and use that as the mir context identification.
-  m_units_.push_back(new DexCompilationUnit(cu_, class_loader, Runtime::Current()->GetClassLinker(),
-                     dex_file, current_code_item_, class_def_idx, method_idx, access_flags,
-                     cu_->compiler_driver->GetVerifiedMethod(&dex_file, method_idx)));
+  m_units_.push_back(new (arena_) DexCompilationUnit(
+      cu_, class_loader, Runtime::Current()->GetClassLinker(), dex_file,
+      current_code_item_, class_def_idx, method_idx, access_flags,
+      cu_->compiler_driver->GetVerifiedMethod(&dex_file, method_idx)));
   const uint16_t* code_ptr = current_code_item_->insns_;
   const uint16_t* code_end =
       current_code_item_->insns_ + current_code_item_->insns_size_in_code_units_;
@@ -717,8 +718,8 @@
 
   // If this is the first method, set up default entry and exit blocks.
   if (current_method_ == 0) {
-    DCHECK(entry_block_ == NULL);
-    DCHECK(exit_block_ == NULL);
+    DCHECK(entry_block_ == nullptr);
+    DCHECK(exit_block_ == nullptr);
     DCHECK_EQ(GetNumBlocks(), 0U);
     // Use id 0 to represent a null block.
     BasicBlock* null_block = CreateNewBB(kNullBlock);
@@ -754,7 +755,7 @@
     insn->m_unit_index = current_method_;
     int width = ParseInsn(code_ptr, &insn->dalvikInsn);
     Instruction::Code opcode = insn->dalvikInsn.opcode;
-    if (opcode_count_ != NULL) {
+    if (opcode_count_ != nullptr) {
       opcode_count_[static_cast<int>(opcode)]++;
     }
 
@@ -878,7 +879,7 @@
 }
 
 void MIRGraph::ShowOpcodeStats() {
-  DCHECK(opcode_count_ != NULL);
+  DCHECK(opcode_count_ != nullptr);
   LOG(INFO) << "Opcode Count";
   for (int i = 0; i < kNumPackedOpcodes; i++) {
     if (opcode_count_[i] != 0) {
@@ -946,7 +947,7 @@
     return;
   }
   file = fopen(fpath.c_str(), "w");
-  if (file == NULL) {
+  if (file == nullptr) {
     PLOG(ERROR) << "Could not open " << fpath << " for DumpCFG.";
     return;
   }
@@ -960,7 +961,7 @@
   for (idx = 0; idx < num_blocks; idx++) {
     int block_idx = all_blocks ? idx : dfs_order_[idx];
     BasicBlock* bb = GetBasicBlock(block_idx);
-    if (bb == NULL) continue;
+    if (bb == nullptr) continue;
     if (bb->block_type == kDead) continue;
     if (bb->hidden) continue;
     if (bb->block_type == kEntryBlock) {
@@ -1500,8 +1501,8 @@
     }
     nop = true;
   }
-  int defs = (ssa_rep != NULL) ? ssa_rep->num_defs : 0;
-  int uses = (ssa_rep != NULL) ? ssa_rep->num_uses : 0;
+  int defs = (ssa_rep != nullptr) ? ssa_rep->num_defs : 0;
+  int uses = (ssa_rep != nullptr) ? ssa_rep->num_uses : 0;
 
   if (MIR::DecodedInstruction::IsPseudoMirOp(opcode)) {
     // Note that this does not check the MIR's opcode in all cases. In cases where it
@@ -1529,7 +1530,7 @@
     for (int i = 0; i < uses; i++) {
       str.append(" ");
       str.append(GetSSANameWithConst(ssa_rep->uses[i], show_singles));
-      if (!show_singles && (reg_location_ != NULL) && reg_location_[i].wide) {
+      if (!show_singles && (reg_location_ != nullptr) && reg_location_[i].wide) {
         // For the listing, skip the high sreg.
         i++;
       }
@@ -1622,7 +1623,7 @@
 
 // Similar to GetSSAName, but if ssa name represents an immediate show that as well.
 std::string MIRGraph::GetSSANameWithConst(int ssa_reg, bool singles_only) {
-  if (reg_location_ == NULL) {
+  if (reg_location_ == nullptr) {
     // Pre-SSA - just use the standard name.
     return GetSSAName(ssa_reg);
   }
@@ -1715,7 +1716,7 @@
   CallInfo* info = static_cast<CallInfo*>(arena_->Alloc(sizeof(CallInfo),
                                                         kArenaAllocMisc));
   MIR* move_result_mir = FindMoveResult(bb, mir);
-  if (move_result_mir == NULL) {
+  if (move_result_mir == nullptr) {
     info->result.location = kLocInvalid;
   } else {
     info->result = GetRawDest(move_result_mir);
@@ -2293,7 +2294,7 @@
 
 void BasicBlock::ResetOptimizationFlags(uint16_t reset_flags) {
   // Reset flags for all MIRs in bb.
-  for (MIR* mir = first_mir_insn; mir != NULL; mir = mir->next) {
+  for (MIR* mir = first_mir_insn; mir != nullptr; mir = mir->next) {
     mir->optimization_flags &= (~reset_flags);
   }
 }
diff --git a/compiler/dex/mir_graph.h b/compiler/dex/mir_graph.h
index 85b1344..0db54bf 100644
--- a/compiler/dex/mir_graph.h
+++ b/compiler/dex/mir_graph.h
@@ -39,6 +39,7 @@
 class GlobalValueNumbering;
 class GvnDeadCodeElimination;
 class PassManager;
+class TypeInference;
 
 // Forward declaration.
 class MIRGraph;
@@ -64,6 +65,7 @@
   kNullTransferSrc0,     // Object copy src[0] -> dst.
   kNullTransferSrcN,     // Phi null check state transfer.
   kRangeCheckC,          // Range check of C.
+  kCheckCastA,           // Check cast of A.
   kFPA,
   kFPB,
   kFPC,
@@ -73,6 +75,7 @@
   kRefA,
   kRefB,
   kRefC,
+  kSameTypeAB,           // A and B have the same type but it can be core/ref/fp (IF_cc).
   kUsesMethodStar,       // Implicit use of Method*.
   kUsesIField,           // Accesses an instance field (IGET/IPUT).
   kUsesSField,           // Accesses a static field (SGET/SPUT).
@@ -101,6 +104,7 @@
 #define DF_NULL_TRANSFER_0      (UINT64_C(1) << kNullTransferSrc0)
 #define DF_NULL_TRANSFER_N      (UINT64_C(1) << kNullTransferSrcN)
 #define DF_RANGE_CHK_C          (UINT64_C(1) << kRangeCheckC)
+#define DF_CHK_CAST             (UINT64_C(1) << kCheckCastA)
 #define DF_FP_A                 (UINT64_C(1) << kFPA)
 #define DF_FP_B                 (UINT64_C(1) << kFPB)
 #define DF_FP_C                 (UINT64_C(1) << kFPC)
@@ -110,6 +114,7 @@
 #define DF_REF_A                (UINT64_C(1) << kRefA)
 #define DF_REF_B                (UINT64_C(1) << kRefB)
 #define DF_REF_C                (UINT64_C(1) << kRefC)
+#define DF_SAME_TYPE_AB         (UINT64_C(1) << kSameTypeAB)
 #define DF_UMS                  (UINT64_C(1) << kUsesMethodStar)
 #define DF_IFIELD               (UINT64_C(1) << kUsesIField)
 #define DF_SFIELD               (UINT64_C(1) << kUsesSField)
@@ -217,13 +222,11 @@
  */
 struct SSARepresentation {
   int32_t* uses;
-  bool* fp_use;
   int32_t* defs;
-  bool* fp_def;
-  int16_t num_uses_allocated;
-  int16_t num_defs_allocated;
-  int16_t num_uses;
-  int16_t num_defs;
+  uint16_t num_uses_allocated;
+  uint16_t num_defs_allocated;
+  uint16_t num_uses;
+  uint16_t num_defs;
 
   static uint32_t GetStartUseIndex(Instruction::Code opcode);
 };
@@ -334,7 +337,8 @@
     // SGET/SPUT lowering info index, points to MIRGraph::sfield_lowering_infos_. Due to limit on
     // the number of code points (64K) and size of SGET/SPUT insn (2), this will never exceed 32K.
     uint32_t sfield_lowering_info;
-    // INVOKE data index, points to MIRGraph::method_lowering_infos_.
+    // INVOKE data index, points to MIRGraph::method_lowering_infos_. Also used for inlined
+    // CONST and MOVE insn (with MIR_CALLEE) to remember the invoke for type inference.
     uint32_t method_lowering_info;
   } meta;
 
@@ -598,7 +602,7 @@
 
   BasicBlock* GetBasicBlock(unsigned int block_id) const {
     DCHECK_LT(block_id, block_list_.size());  // NOTE: NullBasicBlockId is 0.
-    return (block_id == NullBasicBlockId) ? NULL : block_list_[block_id];
+    return (block_id == NullBasicBlockId) ? nullptr : block_list_[block_id];
   }
 
   size_t GetBasicBlockListCount() const {
@@ -647,6 +651,10 @@
    */
   void DumpCFG(const char* dir_prefix, bool all_blocks, const char* suffix = nullptr);
 
+  bool HasCheckCast() const {
+    return (merged_df_flags_ & DF_CHK_CAST) != 0u;
+  }
+
   bool HasFieldAccess() const {
     return (merged_df_flags_ & (DF_IFIELD | DF_SFIELD)) != 0u;
   }
@@ -691,8 +699,16 @@
   void DoCacheMethodLoweringInfo();
 
   const MirMethodLoweringInfo& GetMethodLoweringInfo(MIR* mir) const {
-    DCHECK_LT(mir->meta.method_lowering_info, method_lowering_infos_.size());
-    return method_lowering_infos_[mir->meta.method_lowering_info];
+    return GetMethodLoweringInfo(mir->meta.method_lowering_info);
+  }
+
+  const MirMethodLoweringInfo& GetMethodLoweringInfo(uint32_t lowering_info) const {
+    DCHECK_LT(lowering_info, method_lowering_infos_.size());
+    return method_lowering_infos_[lowering_info];
+  }
+
+  size_t GetMethodLoweringInfoCount() const {
+    return method_lowering_infos_.size();
   }
 
   void ComputeInlineIFieldLoweringInfo(uint16_t field_idx, MIR* invoke, MIR* iget_or_iput);
@@ -1073,7 +1089,9 @@
   bool EliminateNullChecksGate();
   bool EliminateNullChecks(BasicBlock* bb);
   void EliminateNullChecksEnd();
+  void InferTypesStart();
   bool InferTypes(BasicBlock* bb);
+  void InferTypesEnd();
   bool EliminateClassInitChecksGate();
   bool EliminateClassInitChecks(BasicBlock* bb);
   void EliminateClassInitChecksEnd();
@@ -1100,34 +1118,6 @@
     return temp_.gvn.sfield_ids[mir->meta.sfield_lowering_info];
   }
 
-  /*
-   * Type inference handling helpers.  Because Dalvik's bytecode is not fully typed,
-   * we have to do some work to figure out the sreg type.  For some operations it is
-   * clear based on the opcode (i.e. ADD_FLOAT v0, v1, v2), but for others (MOVE), we
-   * may never know the "real" type.
-   *
-   * We perform the type inference operation by using an iterative  walk over
-   * the graph, propagating types "defined" by typed opcodes to uses and defs in
-   * non-typed opcodes (such as MOVE).  The Setxx(index) helpers are used to set defined
-   * types on typed opcodes (such as ADD_INT).  The Setxx(index, is_xx) form is used to
-   * propagate types through non-typed opcodes such as PHI and MOVE.  The is_xx flag
-   * tells whether our guess of the type is based on a previously typed definition.
-   * If so, the defined type takes precedence.  Note that it's possible to have the same sreg
-   * show multiple defined types because dx treats constants as untyped bit patterns.
-   * The return value of the Setxx() helpers says whether or not the Setxx() action changed
-   * the current guess, and is used to know when to terminate the iterative walk.
-   */
-  bool SetFp(int index, bool is_fp);
-  bool SetFp(int index);
-  bool SetCore(int index, bool is_core);
-  bool SetCore(int index);
-  bool SetRef(int index, bool is_ref);
-  bool SetRef(int index);
-  bool SetWide(int index, bool is_wide);
-  bool SetWide(int index);
-  bool SetHigh(int index, bool is_high);
-  bool SetHigh(int index);
-
   bool PuntToInterpreter() {
     return punt_to_interpreter_;
   }
@@ -1252,7 +1242,6 @@
   static const char* extended_mir_op_names_[kMirOpLast - kMirOpFirst];
 
   void HandleSSADef(int* defs, int dalvik_reg, int reg_index);
-  bool InferTypeAndSize(BasicBlock* bb, MIR* mir, bool changed);
 
  protected:
   int FindCommonParent(int block1, int block2);
@@ -1399,6 +1388,7 @@
       ArenaBitVector* work_live_vregs;
       ArenaBitVector** def_block_matrix;  // num_vregs x num_blocks_.
       ArenaBitVector** phi_node_blocks;  // num_vregs x num_blocks_.
+      TypeInference* ti;
     } ssa;
     // Global value numbering.
     struct {
@@ -1458,6 +1448,7 @@
   friend class GvnDeadCodeEliminationTest;
   friend class LocalValueNumberingTest;
   friend class TopologicalSortOrderTest;
+  friend class TypeInferenceTest;
   friend class QuickCFITest;
 };
 
diff --git a/compiler/dex/mir_method_info.h b/compiler/dex/mir_method_info.h
index 7230c46..946c74b 100644
--- a/compiler/dex/mir_method_info.h
+++ b/compiler/dex/mir_method_info.h
@@ -88,7 +88,7 @@
   // The type index of the class declaring the method, 0 if unresolved.
   uint16_t declaring_class_idx_;
   // The dex file that defines the class containing the method and the method,
-  // nullptr if unresolved.
+  // null if unresolved.
   const DexFile* declaring_dex_file_;
 };
 
@@ -223,7 +223,7 @@
   uintptr_t direct_code_;
   uintptr_t direct_method_;
   // Before Resolve(), target_dex_file_ and target_method_idx_ hold the verification-based
-  // devirtualized invoke target if available, nullptr and 0u otherwise.
+  // devirtualized invoke target if available, null and 0u otherwise.
   // After Resolve() they hold the actual target method that will be called; it will be either
   // a devirtualized target method or the compilation's unit's dex file and MethodIndex().
   const DexFile* target_dex_file_;
@@ -232,6 +232,7 @@
   int stats_flags_;
 
   friend class MirOptimizationTest;
+  friend class TypeInferenceTest;
 };
 
 }  // namespace art
diff --git a/compiler/dex/mir_optimization.cc b/compiler/dex/mir_optimization.cc
index 9d7b4b4..467c14e 100644
--- a/compiler/dex/mir_optimization.cc
+++ b/compiler/dex/mir_optimization.cc
@@ -25,6 +25,7 @@
 #include "gvn_dead_code_elimination.h"
 #include "local_value_numbering.h"
 #include "mir_field_info.h"
+#include "type_inference.h"
 #include "quick/dex_file_method_inliner.h"
 #include "quick/dex_file_to_method_inliner_map.h"
 #include "stack.h"
@@ -54,7 +55,7 @@
 void MIRGraph::DoConstantPropagation(BasicBlock* bb) {
   MIR* mir;
 
-  for (mir = bb->first_mir_insn; mir != NULL; mir = mir->next) {
+  for (mir = bb->first_mir_insn; mir != nullptr; mir = mir->next) {
     // Skip pass if BB has MIR without SSA representation.
     if (mir->ssa_rep == nullptr) {
        return;
@@ -115,11 +116,11 @@
 /* Advance to next strictly dominated MIR node in an extended basic block */
 MIR* MIRGraph::AdvanceMIR(BasicBlock** p_bb, MIR* mir) {
   BasicBlock* bb = *p_bb;
-  if (mir != NULL) {
+  if (mir != nullptr) {
     mir = mir->next;
-    while (mir == NULL) {
+    while (mir == nullptr) {
       bb = GetBasicBlock(bb->fall_through);
-      if ((bb == NULL) || Predecessors(bb) != 1) {
+      if ((bb == nullptr) || Predecessors(bb) != 1) {
         // mir is null and we cannot proceed further.
         break;
       } else {
@@ -133,7 +134,7 @@
 
 /*
  * To be used at an invoke mir.  If the logically next mir node represents
- * a move-result, return it.  Else, return NULL.  If a move-result exists,
+ * a move-result, return it.  Else, return nullptr.  If a move-result exists,
  * it is required to immediately follow the invoke with no intervening
  * opcodes or incoming arcs.  However, if the result of the invoke is not
  * used, a move-result may not be present.
@@ -141,7 +142,7 @@
 MIR* MIRGraph::FindMoveResult(BasicBlock* bb, MIR* mir) {
   BasicBlock* tbb = bb;
   mir = AdvanceMIR(&tbb, mir);
-  while (mir != NULL) {
+  while (mir != nullptr) {
     if ((mir->dalvikInsn.opcode == Instruction::MOVE_RESULT) ||
         (mir->dalvikInsn.opcode == Instruction::MOVE_RESULT_OBJECT) ||
         (mir->dalvikInsn.opcode == Instruction::MOVE_RESULT_WIDE)) {
@@ -151,7 +152,7 @@
     if (MIR::DecodedInstruction::IsPseudoMirOp(mir->dalvikInsn.opcode)) {
       mir = AdvanceMIR(&tbb, mir);
     } else {
-      mir = NULL;
+      mir = nullptr;
     }
   }
   return mir;
@@ -159,29 +160,29 @@
 
 BasicBlock* MIRGraph::NextDominatedBlock(BasicBlock* bb) {
   if (bb->block_type == kDead) {
-    return NULL;
+    return nullptr;
   }
   DCHECK((bb->block_type == kEntryBlock) || (bb->block_type == kDalvikByteCode)
       || (bb->block_type == kExitBlock));
   BasicBlock* bb_taken = GetBasicBlock(bb->taken);
   BasicBlock* bb_fall_through = GetBasicBlock(bb->fall_through);
-  if (((bb_fall_through == NULL) && (bb_taken != NULL)) &&
+  if (((bb_fall_through == nullptr) && (bb_taken != nullptr)) &&
       ((bb_taken->block_type == kDalvikByteCode) || (bb_taken->block_type == kExitBlock))) {
     // Follow simple unconditional branches.
     bb = bb_taken;
   } else {
     // Follow simple fallthrough
-    bb = (bb_taken != NULL) ? NULL : bb_fall_through;
+    bb = (bb_taken != nullptr) ? nullptr : bb_fall_through;
   }
-  if (bb == NULL || (Predecessors(bb) != 1)) {
-    return NULL;
+  if (bb == nullptr || (Predecessors(bb) != 1)) {
+    return nullptr;
   }
   DCHECK((bb->block_type == kDalvikByteCode) || (bb->block_type == kExitBlock));
   return bb;
 }
 
 static MIR* FindPhi(BasicBlock* bb, int ssa_name) {
-  for (MIR* mir = bb->first_mir_insn; mir != NULL; mir = mir->next) {
+  for (MIR* mir = bb->first_mir_insn; mir != nullptr; mir = mir->next) {
     if (static_cast<int>(mir->dalvikInsn.opcode) == kMirOpPhi) {
       for (int i = 0; i < mir->ssa_rep->num_uses; i++) {
         if (mir->ssa_rep->uses[i] == ssa_name) {
@@ -190,11 +191,11 @@
       }
     }
   }
-  return NULL;
+  return nullptr;
 }
 
 static SelectInstructionKind SelectKind(MIR* mir) {
-  // Work with the case when mir is nullptr.
+  // Work with the case when mir is null.
   if (mir == nullptr) {
     return kSelectNone;
   }
@@ -255,7 +256,8 @@
   }
 
   // Calculate remaining ME temps available.
-  size_t remaining_me_temps = max_available_non_special_compiler_temps_ - reserved_temps_for_backend_;
+  size_t remaining_me_temps = max_available_non_special_compiler_temps_ -
+      reserved_temps_for_backend_;
 
   if (num_non_special_compiler_temps_ >= remaining_me_temps) {
     return 0;
@@ -346,7 +348,8 @@
     size_t available_temps = GetNumAvailableVRTemps();
     if (available_temps <= 0 || (available_temps <= 1 && wide)) {
       if (verbose) {
-        LOG(INFO) << "CompilerTemps: Not enough temp(s) of type " << ct_type_str << " are available.";
+        LOG(INFO) << "CompilerTemps: Not enough temp(s) of type " << ct_type_str
+            << " are available.";
       }
       return nullptr;
     }
@@ -364,8 +367,8 @@
   compiler_temp->s_reg_low = AddNewSReg(compiler_temp->v_reg);
 
   if (verbose) {
-    LOG(INFO) << "CompilerTemps: New temp of type " << ct_type_str << " with v" << compiler_temp->v_reg
-        << " and s" << compiler_temp->s_reg_low << " has been created.";
+    LOG(INFO) << "CompilerTemps: New temp of type " << ct_type_str << " with v"
+        << compiler_temp->v_reg << " and s" << compiler_temp->s_reg_low << " has been created.";
   }
 
   if (wide) {
@@ -477,8 +480,8 @@
     local_valnum.reset(new (allocator.get()) LocalValueNumbering(global_valnum.get(), bb->id,
                                                                  allocator.get()));
   }
-  while (bb != NULL) {
-    for (MIR* mir = bb->first_mir_insn; mir != NULL; mir = mir->next) {
+  while (bb != nullptr) {
+    for (MIR* mir = bb->first_mir_insn; mir != nullptr; mir = mir->next) {
       // TUNING: use the returned value number for CSE.
       if (use_lvn) {
         local_valnum->GetValueNumber(mir);
@@ -537,7 +540,7 @@
             // Bitcode doesn't allow this optimization.
             break;
           }
-          if (mir->next != NULL) {
+          if (mir->next != nullptr) {
             MIR* mir_next = mir->next;
             // Make sure result of cmp is used by next insn and nowhere else
             if (IsInstructionIfCcZ(mir_next->dalvikInsn.opcode) &&
@@ -574,7 +577,6 @@
               // Copy the SSA information that is relevant.
               mir_next->ssa_rep->num_uses = mir->ssa_rep->num_uses;
               mir_next->ssa_rep->uses = mir->ssa_rep->uses;
-              mir_next->ssa_rep->fp_use = mir->ssa_rep->fp_use;
               mir_next->ssa_rep->num_defs = 0;
               mir->ssa_rep->num_uses = 0;
               mir->ssa_rep->num_defs = 0;
@@ -594,12 +596,12 @@
            cu_->instruction_set == kX86 || cu_->instruction_set == kX86_64) &&
           IsInstructionIfCcZ(mir->dalvikInsn.opcode)) {
         BasicBlock* ft = GetBasicBlock(bb->fall_through);
-        DCHECK(ft != NULL);
+        DCHECK(ft != nullptr);
         BasicBlock* ft_ft = GetBasicBlock(ft->fall_through);
         BasicBlock* ft_tk = GetBasicBlock(ft->taken);
 
         BasicBlock* tk = GetBasicBlock(bb->taken);
-        DCHECK(tk != NULL);
+        DCHECK(tk != nullptr);
         BasicBlock* tk_ft = GetBasicBlock(tk->fall_through);
         BasicBlock* tk_tk = GetBasicBlock(tk->taken);
 
@@ -608,7 +610,7 @@
          * transfers to the rejoin block and the fall_though edge goes to a block that
          * unconditionally falls through to the rejoin block.
          */
-        if ((tk_ft == NULL) && (ft_tk == NULL) && (tk_tk == ft_ft) &&
+        if ((tk_ft == nullptr) && (ft_tk == nullptr) && (tk_tk == ft_ft) &&
             (Predecessors(tk) == 1) && (Predecessors(ft) == 1)) {
           /*
            * Okay - we have the basic diamond shape.
@@ -628,7 +630,7 @@
             MIR* if_false = ft->first_mir_insn;
             // It's possible that the target of the select isn't used - skip those (rare) cases.
             MIR* phi = FindPhi(tk_tk, if_true->ssa_rep->defs[0]);
-            if ((phi != NULL) && (if_true->dalvikInsn.vA == if_false->dalvikInsn.vA)) {
+            if ((phi != nullptr) && (if_true->dalvikInsn.vA == if_false->dalvikInsn.vA)) {
               /*
                * We'll convert the IF_EQZ/IF_NEZ to a SELECT.  We need to find the
                * Phi node in the merge block and delete it (while using the SSA name
@@ -668,16 +670,7 @@
                 mir->ssa_rep->uses = src_ssa;
                 mir->ssa_rep->num_uses = 3;
               }
-              mir->ssa_rep->num_defs = 1;
-              mir->ssa_rep->defs = arena_->AllocArray<int32_t>(1, kArenaAllocDFInfo);
-              mir->ssa_rep->fp_def = arena_->AllocArray<bool>(1, kArenaAllocDFInfo);
-              mir->ssa_rep->fp_def[0] = if_true->ssa_rep->fp_def[0];
-              // Match type of uses to def.
-              mir->ssa_rep->fp_use = arena_->AllocArray<bool>(mir->ssa_rep->num_uses,
-                                                              kArenaAllocDFInfo);
-              for (int i = 0; i < mir->ssa_rep->num_uses; i++) {
-                mir->ssa_rep->fp_use[i] = mir->ssa_rep->fp_def[0];
-              }
+              AllocateSSADefData(mir, 1);
               /*
                * There is usually a Phi node in the join block for our two cases.  If the
                * Phi node only contains our two cases as input, we will use the result
@@ -721,7 +714,8 @@
         }
       }
     }
-    bb = ((cu_->disable_opt & (1 << kSuppressExceptionEdges)) != 0) ? NextDominatedBlock(bb) : NULL;
+    bb = ((cu_->disable_opt & (1 << kSuppressExceptionEdges)) != 0) ? NextDominatedBlock(bb) :
+        nullptr;
   }
   if (use_lvn && UNLIKELY(!global_valnum->Good())) {
     LOG(WARNING) << "LVN overflow in " << PrettyMethod(cu_->method_idx, *cu_->dex_file);
@@ -732,9 +726,9 @@
 
 /* Collect stats on number of checks removed */
 void MIRGraph::CountChecks(class BasicBlock* bb) {
-  if (bb->data_flow_info != NULL) {
-    for (MIR* mir = bb->first_mir_insn; mir != NULL; mir = mir->next) {
-      if (mir->ssa_rep == NULL) {
+  if (bb->data_flow_info != nullptr) {
+    for (MIR* mir = bb->first_mir_insn; mir != nullptr; mir = mir->next) {
+      if (mir->ssa_rep == nullptr) {
         continue;
       }
       uint64_t df_attributes = GetDataFlowAttributes(mir);
@@ -935,7 +929,7 @@
   // reset MIR_MARK
   AllNodesIterator iter(this);
   for (BasicBlock* bb = iter.Next(); bb != nullptr; bb = iter.Next()) {
-    for (MIR* mir = bb->first_mir_insn; mir != NULL; mir = mir->next) {
+    for (MIR* mir = bb->first_mir_insn; mir != nullptr; mir = mir->next) {
       mir->optimization_flags &= ~MIR_MARK;
     }
   }
@@ -1010,7 +1004,7 @@
   // no intervening uses.
 
   // Walk through the instruction in the block, updating as necessary
-  for (MIR* mir = bb->first_mir_insn; mir != NULL; mir = mir->next) {
+  for (MIR* mir = bb->first_mir_insn; mir != nullptr; mir = mir->next) {
     uint64_t df_attributes = GetDataFlowAttributes(mir);
 
     if ((df_attributes & DF_NULL_TRANSFER_N) != 0u) {
@@ -1121,7 +1115,7 @@
   // converge MIR_MARK with MIR_IGNORE_NULL_CHECK
   AllNodesIterator iter(this);
   for (BasicBlock* bb = iter.Next(); bb != nullptr; bb = iter.Next()) {
-    for (MIR* mir = bb->first_mir_insn; mir != NULL; mir = mir->next) {
+    for (MIR* mir = bb->first_mir_insn; mir != nullptr; mir = mir->next) {
       constexpr int kMarkToIgnoreNullCheckShift = kMIRMark - kMIRIgnoreNullCheck;
       static_assert(kMarkToIgnoreNullCheckShift > 0, "Not a valid right-shift");
       uint16_t mirMarkAdjustedToIgnoreNullCheck =
@@ -1131,23 +1125,26 @@
   }
 }
 
+void MIRGraph::InferTypesStart() {
+  DCHECK(temp_scoped_alloc_ != nullptr);
+  temp_.ssa.ti = new (temp_scoped_alloc_.get()) TypeInference(this, temp_scoped_alloc_.get());
+}
+
 /*
  * Perform type and size inference for a basic block.
  */
 bool MIRGraph::InferTypes(BasicBlock* bb) {
   if (bb->data_flow_info == nullptr) return false;
 
-  bool infer_changed = false;
-  for (MIR* mir = bb->first_mir_insn; mir != NULL; mir = mir->next) {
-    if (mir->ssa_rep == NULL) {
-        continue;
-    }
+  DCHECK(temp_.ssa.ti != nullptr);
+  return temp_.ssa.ti->Apply(bb);
+}
 
-    // Propagate type info.
-    infer_changed = InferTypeAndSize(bb, mir, infer_changed);
-  }
-
-  return infer_changed;
+void MIRGraph::InferTypesEnd() {
+  DCHECK(temp_.ssa.ti != nullptr);
+  temp_.ssa.ti->Finish();
+  delete temp_.ssa.ti;
+  temp_.ssa.ti = nullptr;
 }
 
 bool MIRGraph::EliminateClassInitChecksGate() {
@@ -1509,7 +1506,7 @@
   if (bb->block_type != kDalvikByteCode) {
     return;
   }
-  for (MIR* mir = bb->first_mir_insn; mir != NULL; mir = mir->next) {
+  for (MIR* mir = bb->first_mir_insn; mir != nullptr; mir = mir->next) {
     if (MIR::DecodedInstruction::IsPseudoMirOp(mir->dalvikInsn.opcode)) {
       continue;
     }
@@ -1540,7 +1537,8 @@
             ->GenInline(this, bb, mir, target.dex_method_index)) {
       if (cu_->verbose || cu_->print_pass) {
         LOG(INFO) << "SpecialMethodInliner: Inlined " << method_info.GetInvokeType() << " ("
-            << sharp_type << ") call to \"" << PrettyMethod(target.dex_method_index, *target.dex_file)
+            << sharp_type << ") call to \"" << PrettyMethod(target.dex_method_index,
+                                                            *target.dex_file)
             << "\" from \"" << PrettyMethod(cu_->method_idx, *cu_->dex_file)
             << "\" @0x" << std::hex << mir->offset;
       }
@@ -1564,7 +1562,7 @@
       static_cast<Checkstats*>(arena_->Alloc(sizeof(Checkstats), kArenaAllocDFInfo));
   checkstats_ = stats;
   AllNodesIterator iter(this);
-  for (BasicBlock* bb = iter.Next(); bb != NULL; bb = iter.Next()) {
+  for (BasicBlock* bb = iter.Next(); bb != nullptr; bb = iter.Next()) {
     CountChecks(bb);
   }
   if (stats->null_checks > 0) {
@@ -1597,7 +1595,7 @@
   bool terminated_by_return = false;
   bool do_local_value_numbering = false;
   // Visit blocks strictly dominated by this head.
-  while (bb != NULL) {
+  while (bb != nullptr) {
     bb->visited = true;
     terminated_by_return |= bb->terminated_by_return;
     do_local_value_numbering |= bb->use_lvn;
@@ -1606,7 +1604,7 @@
   if (terminated_by_return || do_local_value_numbering) {
     // Do lvn for all blocks in this extended set.
     bb = start_bb;
-    while (bb != NULL) {
+    while (bb != nullptr) {
       bb->use_lvn = do_local_value_numbering;
       bb->dominates_return = terminated_by_return;
       bb = NextDominatedBlock(bb);
@@ -1629,7 +1627,7 @@
   if ((cu_->disable_opt & (1 << kSuppressExceptionEdges)) != 0) {
     ClearAllVisitedFlags();
     PreOrderDfsIterator iter2(this);
-    for (BasicBlock* bb = iter2.Next(); bb != NULL; bb = iter2.Next()) {
+    for (BasicBlock* bb = iter2.Next(); bb != nullptr; bb = iter2.Next()) {
       BuildExtendedBBList(bb);
     }
     // Perform extended basic block optimizations.
@@ -1638,7 +1636,7 @@
     }
   } else {
     PreOrderDfsIterator iter(this);
-    for (BasicBlock* bb = iter.Next(); bb != NULL; bb = iter.Next()) {
+    for (BasicBlock* bb = iter.Next(); bb != nullptr; bb = iter.Next()) {
       BasicBlockOpt(bb);
     }
   }
diff --git a/compiler/dex/pass_driver.h b/compiler/dex/pass_driver.h
index 671bcec..8762b53 100644
--- a/compiler/dex/pass_driver.h
+++ b/compiler/dex/pass_driver.h
@@ -68,7 +68,7 @@
    * @return whether the pass was applied.
    */
   virtual bool RunPass(const char* pass_name) {
-    // Paranoid: c_unit cannot be nullptr and we need a pass name.
+    // Paranoid: c_unit cannot be null and we need a pass name.
     DCHECK(pass_name != nullptr);
     DCHECK_NE(pass_name[0], 0);
 
diff --git a/compiler/dex/pass_driver_me.h b/compiler/dex/pass_driver_me.h
index 94eef22..cbe4a02 100644
--- a/compiler/dex/pass_driver_me.h
+++ b/compiler/dex/pass_driver_me.h
@@ -88,7 +88,7 @@
   }
 
   bool RunPass(const Pass* pass, bool time_split) OVERRIDE {
-    // Paranoid: c_unit and pass cannot be nullptr, and the pass should have a name
+    // Paranoid: c_unit and pass cannot be null, and the pass should have a name.
     DCHECK(pass != nullptr);
     DCHECK(pass->GetName() != nullptr && pass->GetName()[0] != 0);
     CompilationUnit* c_unit = pass_me_data_holder_.c_unit;
@@ -211,8 +211,9 @@
    * @param settings_to_fill Fills the options to contain the mapping of name of option to the new
    * configuration.
    */
-  static void FillOverriddenPassSettings(const PassManagerOptions* options, const char* pass_name,
-                                         SafeMap<const std::string, const OptionContent>& settings_to_fill) {
+  static void FillOverriddenPassSettings(
+      const PassManagerOptions* options, const char* pass_name,
+      SafeMap<const std::string, const OptionContent>& settings_to_fill) {
     const std::string& settings = options->GetOverriddenPassOptions();
     const size_t settings_len = settings.size();
 
diff --git a/compiler/dex/pass_driver_me_post_opt.cc b/compiler/dex/pass_driver_me_post_opt.cc
index a8b8a54..b35bc3d 100644
--- a/compiler/dex/pass_driver_me_post_opt.cc
+++ b/compiler/dex/pass_driver_me_post_opt.cc
@@ -41,7 +41,7 @@
   pass_manager->AddPass(new SSAConversion);
   pass_manager->AddPass(new PhiNodeOperands);
   pass_manager->AddPass(new PerformInitRegLocations);
-  pass_manager->AddPass(new TypeInference);
+  pass_manager->AddPass(new TypeInferencePass);
   pass_manager->AddPass(new FinishSSATransformation);
 }
 
diff --git a/compiler/dex/post_opt_passes.h b/compiler/dex/post_opt_passes.h
index 1ab8625..e9fa0eb 100644
--- a/compiler/dex/post_opt_passes.h
+++ b/compiler/dex/post_opt_passes.h
@@ -263,12 +263,19 @@
 };
 
 /**
- * @class TypeInference
+ * @class TypeInferencePass
  * @brief Type inference pass.
  */
-class TypeInference : public PassMEMirSsaRep {
+class TypeInferencePass : public PassMEMirSsaRep {
  public:
-  TypeInference() : PassMEMirSsaRep("TypeInference", kRepeatingPreOrderDFSTraversal) {
+  TypeInferencePass() : PassMEMirSsaRep("TypeInference", kRepeatingPreOrderDFSTraversal) {
+  }
+
+  void Start(PassDataHolder* data) const {
+    DCHECK(data != nullptr);
+    CompilationUnit* c_unit = down_cast<PassMEDataHolder*>(data)->c_unit;
+    DCHECK(c_unit != nullptr);
+    c_unit->mir_graph->InferTypesStart();
   }
 
   bool Worker(PassDataHolder* data) const {
@@ -280,6 +287,13 @@
     DCHECK(bb != nullptr);
     return c_unit->mir_graph->InferTypes(bb);
   }
+
+  void End(PassDataHolder* data) const {
+    DCHECK(data != nullptr);
+    CompilationUnit* c_unit = down_cast<PassMEDataHolder*>(data)->c_unit;
+    DCHECK(c_unit != nullptr);
+    c_unit->mir_graph.get()->InferTypesEnd();
+  }
 };
 
 /**
diff --git a/compiler/dex/quick/arm/assemble_arm.cc b/compiler/dex/quick/arm/assemble_arm.cc
index c5ac4c1..df4a9f2 100644
--- a/compiler/dex/quick/arm/assemble_arm.cc
+++ b/compiler/dex/quick/arm/assemble_arm.cc
@@ -1055,7 +1055,7 @@
 // new_lir replaces orig_lir in the pcrel_fixup list.
 void ArmMir2Lir::ReplaceFixup(LIR* prev_lir, LIR* orig_lir, LIR* new_lir) {
   new_lir->u.a.pcrel_next = orig_lir->u.a.pcrel_next;
-  if (UNLIKELY(prev_lir == NULL)) {
+  if (UNLIKELY(prev_lir == nullptr)) {
     first_fixup_ = new_lir;
   } else {
     prev_lir->u.a.pcrel_next = new_lir;
@@ -1066,7 +1066,7 @@
 // new_lir is inserted before orig_lir in the pcrel_fixup list.
 void ArmMir2Lir::InsertFixupBefore(LIR* prev_lir, LIR* orig_lir, LIR* new_lir) {
   new_lir->u.a.pcrel_next = orig_lir;
-  if (UNLIKELY(prev_lir == NULL)) {
+  if (UNLIKELY(prev_lir == nullptr)) {
     first_fixup_ = new_lir;
   } else {
     DCHECK(prev_lir->u.a.pcrel_next == orig_lir);
@@ -1084,7 +1084,7 @@
 
 uint8_t* ArmMir2Lir::EncodeLIRs(uint8_t* write_pos, LIR* lir) {
   uint8_t* const write_buffer = write_pos;
-  for (; lir != NULL; lir = NEXT_LIR(lir)) {
+  for (; lir != nullptr; lir = NEXT_LIR(lir)) {
     lir->offset = (write_pos - write_buffer);
     if (!lir->flags.is_nop) {
       int opcode = lir->opcode;
@@ -1258,8 +1258,8 @@
     generation ^= 1;
     // Note: nodes requring possible fixup linked in ascending order.
     lir = first_fixup_;
-    prev_lir = NULL;
-    while (lir != NULL) {
+    prev_lir = nullptr;
+    while (lir != nullptr) {
       /*
        * NOTE: the lir being considered here will be encoded following the switch (so long as
        * we're not in a retry situation).  However, any new non-pc_rel instructions inserted
@@ -1506,7 +1506,7 @@
         case kFixupAdr: {
           const EmbeddedData* tab_rec = UnwrapPointer<EmbeddedData>(lir->operands[2]);
           LIR* target = lir->target;
-          int32_t target_disp = (tab_rec != NULL) ?  tab_rec->offset + offset_adjustment
+          int32_t target_disp = (tab_rec != nullptr) ?  tab_rec->offset + offset_adjustment
               : target->offset + ((target->flags.generation == lir->flags.generation) ? 0 :
               offset_adjustment);
           int32_t disp = target_disp - ((lir->offset + 4) & ~3);
@@ -1642,7 +1642,7 @@
 uint32_t ArmMir2Lir::LinkFixupInsns(LIR* head_lir, LIR* tail_lir, uint32_t offset) {
   LIR* end_lir = tail_lir->next;
 
-  LIR* last_fixup = NULL;
+  LIR* last_fixup = nullptr;
   for (LIR* lir = head_lir; lir != end_lir; lir = NEXT_LIR(lir)) {
     if (!lir->flags.is_nop) {
       if (lir->flags.fixup != kFixupNone) {
@@ -1658,8 +1658,8 @@
         }
         // Link into the fixup chain.
         lir->flags.use_def_invalid = true;
-        lir->u.a.pcrel_next = NULL;
-        if (first_fixup_ == NULL) {
+        lir->u.a.pcrel_next = nullptr;
+        if (first_fixup_ == nullptr) {
           first_fixup_ = lir;
         } else {
           last_fixup->u.a.pcrel_next = lir;
diff --git a/compiler/dex/quick/arm/call_arm.cc b/compiler/dex/quick/arm/call_arm.cc
index 3d18af6..6ba4016 100644
--- a/compiler/dex/quick/arm/call_arm.cc
+++ b/compiler/dex/quick/arm/call_arm.cc
@@ -124,7 +124,7 @@
   }
   // Bounds check - if < 0 or >= size continue following switch
   OpRegImm(kOpCmp, keyReg, size-1);
-  LIR* branch_over = OpCondBranch(kCondHi, NULL);
+  LIR* branch_over = OpCondBranch(kCondHi, nullptr);
 
   // Load the displacement from the switch table
   RegStorage disp_reg = AllocTemp();
@@ -156,7 +156,7 @@
     } else {
       // If the null-check fails its handled by the slow-path to reduce exception related meta-data.
       if (!cu_->compiler_driver->GetCompilerOptions().GetImplicitNullChecks()) {
-        null_check_branch = OpCmpImmBranch(kCondEq, rs_r0, 0, NULL);
+        null_check_branch = OpCmpImmBranch(kCondEq, rs_r0, 0, nullptr);
       }
     }
     Load32Disp(rs_rARM_SELF, Thread::ThinLockIdOffset<4>().Int32Value(), rs_r2);
@@ -165,12 +165,12 @@
     MarkPossibleNullPointerException(opt_flags);
     // Zero out the read barrier bits.
     OpRegRegImm(kOpAnd, rs_r3, rs_r1, LockWord::kReadBarrierStateMaskShiftedToggled);
-    LIR* not_unlocked_branch = OpCmpImmBranch(kCondNe, rs_r3, 0, NULL);
+    LIR* not_unlocked_branch = OpCmpImmBranch(kCondNe, rs_r3, 0, nullptr);
     // r1 is zero except for the rb bits here. Copy the read barrier bits into r2.
     OpRegRegReg(kOpOr, rs_r2, rs_r2, rs_r1);
     NewLIR4(kThumb2Strex, rs_r1.GetReg(), rs_r2.GetReg(), rs_r0.GetReg(),
         mirror::Object::MonitorOffset().Int32Value() >> 2);
-    LIR* lock_success_branch = OpCmpImmBranch(kCondEq, rs_r1, 0, NULL);
+    LIR* lock_success_branch = OpCmpImmBranch(kCondEq, rs_r1, 0, nullptr);
 
 
     LIR* slow_path_target = NewLIR0(kPseudoTargetLabel);
@@ -238,7 +238,7 @@
     } else {
       // If the null-check fails its handled by the slow-path to reduce exception related meta-data.
       if (!cu_->compiler_driver->GetCompilerOptions().GetImplicitNullChecks()) {
-        null_check_branch = OpCmpImmBranch(kCondEq, rs_r0, 0, NULL);
+        null_check_branch = OpCmpImmBranch(kCondEq, rs_r0, 0, nullptr);
       }
     }
     if (!kUseReadBarrier) {
@@ -252,16 +252,16 @@
     OpRegRegImm(kOpAnd, rs_r3, rs_r1, LockWord::kReadBarrierStateMaskShiftedToggled);
     // Zero out except the read barrier bits.
     OpRegRegImm(kOpAnd, rs_r1, rs_r1, LockWord::kReadBarrierStateMaskShifted);
-    LIR* slow_unlock_branch = OpCmpBranch(kCondNe, rs_r3, rs_r2, NULL);
+    LIR* slow_unlock_branch = OpCmpBranch(kCondNe, rs_r3, rs_r2, nullptr);
     GenMemBarrier(kAnyStore);
     LIR* unlock_success_branch;
     if (!kUseReadBarrier) {
       Store32Disp(rs_r0, mirror::Object::MonitorOffset().Int32Value(), rs_r1);
-      unlock_success_branch = OpUnconditionalBranch(NULL);
+      unlock_success_branch = OpUnconditionalBranch(nullptr);
     } else {
       NewLIR4(kThumb2Strex, rs_r2.GetReg(), rs_r1.GetReg(), rs_r0.GetReg(),
               mirror::Object::MonitorOffset().Int32Value() >> 2);
-      unlock_success_branch = OpCmpImmBranch(kCondEq, rs_r2, 0, NULL);
+      unlock_success_branch = OpCmpImmBranch(kCondEq, rs_r2, 0, nullptr);
     }
     LIR* slow_path_target = NewLIR0(kPseudoTargetLabel);
     slow_unlock_branch->target = slow_path_target;
diff --git a/compiler/dex/quick/arm/fp_arm.cc b/compiler/dex/quick/arm/fp_arm.cc
index eb1383f..94fc474 100644
--- a/compiler/dex/quick/arm/fp_arm.cc
+++ b/compiler/dex/quick/arm/fp_arm.cc
@@ -187,7 +187,8 @@
       return;
     }
     case Instruction::FLOAT_TO_LONG:
-      GenConversionCall(kQuickF2l, rl_dest, rl_src);
+      CheckEntrypointTypes<kQuickF2l, int64_t, float>();  // int64_t -> kCoreReg
+      GenConversionCall(kQuickF2l, rl_dest, rl_src, kCoreReg);
       return;
     case Instruction::LONG_TO_FLOAT: {
       rl_src = LoadValueWide(rl_src, kFPReg);
@@ -217,7 +218,8 @@
       return;
     }
     case Instruction::DOUBLE_TO_LONG:
-      GenConversionCall(kQuickD2l, rl_dest, rl_src);
+      CheckEntrypointTypes<kQuickD2l, int64_t, double>();  // int64_t -> kCoreReg
+      GenConversionCall(kQuickD2l, rl_dest, rl_src, kCoreReg);
       return;
     default:
       LOG(FATAL) << "Unexpected opcode: " << opcode;
diff --git a/compiler/dex/quick/arm/int_arm.cc b/compiler/dex/quick/arm/int_arm.cc
index 47669db..8d20f1b 100644
--- a/compiler/dex/quick/arm/int_arm.cc
+++ b/compiler/dex/quick/arm/int_arm.cc
@@ -138,10 +138,10 @@
   RegStorage t_reg = AllocTemp();
   LoadConstant(t_reg, -1);
   OpRegReg(kOpCmp, rl_src1.reg.GetHigh(), rl_src2.reg.GetHigh());
-  LIR* branch1 = OpCondBranch(kCondLt, NULL);
-  LIR* branch2 = OpCondBranch(kCondGt, NULL);
+  LIR* branch1 = OpCondBranch(kCondLt, nullptr);
+  LIR* branch2 = OpCondBranch(kCondGt, nullptr);
   OpRegRegReg(kOpSub, t_reg, rl_src1.reg.GetLow(), rl_src2.reg.GetLow());
-  LIR* branch3 = OpCondBranch(kCondEq, NULL);
+  LIR* branch3 = OpCondBranch(kCondEq, nullptr);
 
   LIR* it = OpIT(kCondHi, "E");
   NewLIR2(kThumb2MovI8M, t_reg.GetReg(), ModifiedImmediate(-1));
@@ -389,7 +389,7 @@
    * generate the long form in an attempt to avoid an extra assembly pass.
    * TODO: consider interspersing slowpaths in code following unconditional branches.
    */
-  bool skip = ((target != NULL) && (target->opcode == kPseudoThrowTarget));
+  bool skip = ((target != nullptr) && (target->opcode == kPseudoThrowTarget));
   skip &= ((mir_graph_->GetNumDalvikInsns() - current_dalvik_offset_) > 64);
   if (!skip && reg.Low8() && (check_value == 0)) {
     if (arm_cond == kArmCondEq || arm_cond == kArmCondNe) {
@@ -882,7 +882,7 @@
   RegLocation rl_object = LoadValue(rl_src_obj, kRefReg);
   RegLocation rl_new_value;
   if (!is_long) {
-    rl_new_value = LoadValue(rl_src_new_value, LocToRegClass(rl_src_new_value));
+    rl_new_value = LoadValue(rl_src_new_value, is_object ? kRefReg : kCoreReg);
   } else if (load_early) {
     rl_new_value = LoadValueWide(rl_src_new_value, kCoreReg);
   }
@@ -905,7 +905,7 @@
 
   RegLocation rl_expected;
   if (!is_long) {
-    rl_expected = LoadValue(rl_src_expected, LocToRegClass(rl_src_new_value));
+    rl_expected = LoadValue(rl_src_expected, is_object ? kRefReg : kCoreReg);
   } else if (load_early) {
     rl_expected = LoadValueWide(rl_src_expected, kCoreReg);
   } else {
@@ -1159,12 +1159,12 @@
 LIR* ArmMir2Lir::OpTestSuspend(LIR* target) {
 #ifdef ARM_R4_SUSPEND_FLAG
   NewLIR2(kThumbSubRI8, rs_rARM_SUSPEND.GetReg(), 1);
-  return OpCondBranch((target == NULL) ? kCondEq : kCondNe, target);
+  return OpCondBranch((target == nullptr) ? kCondEq : kCondNe, target);
 #else
   RegStorage t_reg = AllocTemp();
   LoadBaseDisp(rs_rARM_SELF, Thread::ThreadFlagsOffset<4>().Int32Value(),
     t_reg, kUnsignedHalf, kNotVolatile);
-  LIR* cmp_branch = OpCmpImmBranch((target == NULL) ? kCondNe : kCondEq, t_reg,
+  LIR* cmp_branch = OpCmpImmBranch((target == nullptr) ? kCondNe : kCondEq, t_reg,
     0, target);
   FreeTemp(t_reg);
   return cmp_branch;
diff --git a/compiler/dex/quick/arm/utility_arm.cc b/compiler/dex/quick/arm/utility_arm.cc
index 25ea694..2ef92f8 100644
--- a/compiler/dex/quick/arm/utility_arm.cc
+++ b/compiler/dex/quick/arm/utility_arm.cc
@@ -90,7 +90,7 @@
     }
   }
   LIR* data_target = ScanLiteralPool(literal_list_, value, 0);
-  if (data_target == NULL) {
+  if (data_target == nullptr) {
     data_target = AddWordData(&literal_list_, value);
   }
   ScopedMemRefType mem_ref_type(this, ResourceMask::kLiteral);
@@ -411,7 +411,7 @@
     return NewLIR4(opcode, r_dest_src1.GetReg(), r_dest_src1.GetReg(), r_src2.GetReg(), shift);
   } else {
     LOG(FATAL) << "Unexpected encoding operand count";
-    return NULL;
+    return nullptr;
   }
 }
 
@@ -695,7 +695,7 @@
 }
 
 LIR* ArmMir2Lir::LoadConstantWide(RegStorage r_dest, int64_t value) {
-  LIR* res = NULL;
+  LIR* res = nullptr;
   int32_t val_lo = Low32Bits(value);
   int32_t val_hi = High32Bits(value);
   if (r_dest.IsFloat()) {
@@ -721,10 +721,10 @@
       LoadConstantNoClobber(r_dest.GetHigh(), val_hi);
     }
   }
-  if (res == NULL) {
+  if (res == nullptr) {
     // No short form - load from the literal pool.
     LIR* data_target = ScanLiteralPoolWide(literal_list_, val_lo, val_hi);
-    if (data_target == NULL) {
+    if (data_target == nullptr) {
       data_target = AddWideData(&literal_list_, val_lo, val_hi);
     }
     ScopedMemRefType mem_ref_type(this, ResourceMask::kLiteral);
@@ -814,7 +814,7 @@
 LIR* ArmMir2Lir::StoreBaseIndexed(RegStorage r_base, RegStorage r_index, RegStorage r_src,
                                   int scale, OpSize size) {
   bool all_low_regs = r_base.Low8() && r_index.Low8() && r_src.Low8();
-  LIR* store = NULL;
+  LIR* store = nullptr;
   ArmOpcode opcode = kThumbBkpt;
   bool thumb_form = (all_low_regs && (scale == 0));
   RegStorage reg_ptr;
diff --git a/compiler/dex/quick/arm64/assemble_arm64.cc b/compiler/dex/quick/arm64/assemble_arm64.cc
index 2f1ae66..b78fb80 100644
--- a/compiler/dex/quick/arm64/assemble_arm64.cc
+++ b/compiler/dex/quick/arm64/assemble_arm64.cc
@@ -663,7 +663,7 @@
 // new_lir replaces orig_lir in the pcrel_fixup list.
 void Arm64Mir2Lir::ReplaceFixup(LIR* prev_lir, LIR* orig_lir, LIR* new_lir) {
   new_lir->u.a.pcrel_next = orig_lir->u.a.pcrel_next;
-  if (UNLIKELY(prev_lir == NULL)) {
+  if (UNLIKELY(prev_lir == nullptr)) {
     first_fixup_ = new_lir;
   } else {
     prev_lir->u.a.pcrel_next = new_lir;
@@ -674,7 +674,7 @@
 // new_lir is inserted before orig_lir in the pcrel_fixup list.
 void Arm64Mir2Lir::InsertFixupBefore(LIR* prev_lir, LIR* orig_lir, LIR* new_lir) {
   new_lir->u.a.pcrel_next = orig_lir;
-  if (UNLIKELY(prev_lir == NULL)) {
+  if (UNLIKELY(prev_lir == nullptr)) {
     first_fixup_ = new_lir;
   } else {
     DCHECK(prev_lir->u.a.pcrel_next == orig_lir);
@@ -889,8 +889,8 @@
     generation ^= 1;
     // Note: nodes requiring possible fixup linked in ascending order.
     lir = first_fixup_;
-    prev_lir = NULL;
-    while (lir != NULL) {
+    prev_lir = nullptr;
+    while (lir != nullptr) {
       // NOTE: Any new non-pc_rel instructions inserted due to retry must be explicitly encoded at
       // the time of insertion.  Note that inserted instructions don't need use/def flags, but do
       // need size and pc-rel status properly updated.
@@ -1037,7 +1037,7 @@
               // Check that the instruction preceding the multiply-accumulate is a load or store.
               if ((prev_insn_flags & IS_LOAD) != 0 || (prev_insn_flags & IS_STORE) != 0) {
                 // insert a NOP between the load/store and the multiply-accumulate.
-                LIR* new_lir = RawLIR(lir->dalvik_offset, kA64Nop0, 0, 0, 0, 0, 0, NULL);
+                LIR* new_lir = RawLIR(lir->dalvik_offset, kA64Nop0, 0, 0, 0, 0, 0, nullptr);
                 new_lir->offset = lir->offset;
                 new_lir->flags.fixup = kFixupNone;
                 new_lir->flags.size = EncodingMap[kA64Nop0].size;
@@ -1108,7 +1108,7 @@
 uint32_t Arm64Mir2Lir::LinkFixupInsns(LIR* head_lir, LIR* tail_lir, uint32_t offset) {
   LIR* end_lir = tail_lir->next;
 
-  LIR* last_fixup = NULL;
+  LIR* last_fixup = nullptr;
   for (LIR* lir = head_lir; lir != end_lir; lir = NEXT_LIR(lir)) {
     A64Opcode opcode = UNWIDE(lir->opcode);
     if (!lir->flags.is_nop) {
@@ -1123,8 +1123,8 @@
         }
         // Link into the fixup chain.
         lir->flags.use_def_invalid = true;
-        lir->u.a.pcrel_next = NULL;
-        if (first_fixup_ == NULL) {
+        lir->u.a.pcrel_next = nullptr;
+        if (first_fixup_ == nullptr) {
           first_fixup_ = lir;
         } else {
           last_fixup->u.a.pcrel_next = lir;
diff --git a/compiler/dex/quick/arm64/call_arm64.cc b/compiler/dex/quick/arm64/call_arm64.cc
index 4abbd77..9a7c2ad 100644
--- a/compiler/dex/quick/arm64/call_arm64.cc
+++ b/compiler/dex/quick/arm64/call_arm64.cc
@@ -127,7 +127,7 @@
   }
   // Bounds check - if < 0 or >= size continue following switch
   OpRegImm(kOpCmp, key_reg, size - 1);
-  LIR* branch_over = OpCondBranch(kCondHi, NULL);
+  LIR* branch_over = OpCondBranch(kCondHi, nullptr);
 
   // Load the displacement from the switch table
   RegStorage disp_reg = AllocTemp();
@@ -167,7 +167,7 @@
   } else {
     // If the null-check fails its handled by the slow-path to reduce exception related meta-data.
     if (!cu_->compiler_driver->GetCompilerOptions().GetImplicitNullChecks()) {
-      null_check_branch = OpCmpImmBranch(kCondEq, rs_x0, 0, NULL);
+      null_check_branch = OpCmpImmBranch(kCondEq, rs_x0, 0, nullptr);
     }
   }
   Load32Disp(rs_xSELF, Thread::ThinLockIdOffset<8>().Int32Value(), rs_w1);
@@ -176,12 +176,12 @@
   MarkPossibleNullPointerException(opt_flags);
   // Zero out the read barrier bits.
   OpRegRegImm(kOpAnd, rs_w2, rs_w3, LockWord::kReadBarrierStateMaskShiftedToggled);
-  LIR* not_unlocked_branch = OpCmpImmBranch(kCondNe, rs_w2, 0, NULL);
+  LIR* not_unlocked_branch = OpCmpImmBranch(kCondNe, rs_w2, 0, nullptr);
   // w3 is zero except for the rb bits here. Copy the read barrier bits into w1.
   OpRegRegReg(kOpOr, rs_w1, rs_w1, rs_w3);
   OpRegRegImm(kOpAdd, rs_x2, rs_x0, mirror::Object::MonitorOffset().Int32Value());
   NewLIR3(kA64Stxr3wrX, rw3, rw1, rx2);
-  LIR* lock_success_branch = OpCmpImmBranch(kCondEq, rs_w3, 0, NULL);
+  LIR* lock_success_branch = OpCmpImmBranch(kCondEq, rs_w3, 0, nullptr);
 
   LIR* slow_path_target = NewLIR0(kPseudoTargetLabel);
   not_unlocked_branch->target = slow_path_target;
@@ -220,7 +220,7 @@
   } else {
     // If the null-check fails its handled by the slow-path to reduce exception related meta-data.
     if (!cu_->compiler_driver->GetCompilerOptions().GetImplicitNullChecks()) {
-      null_check_branch = OpCmpImmBranch(kCondEq, rs_x0, 0, NULL);
+      null_check_branch = OpCmpImmBranch(kCondEq, rs_x0, 0, nullptr);
     }
   }
   Load32Disp(rs_xSELF, Thread::ThinLockIdOffset<8>().Int32Value(), rs_w1);
@@ -235,16 +235,16 @@
   OpRegRegImm(kOpAnd, rs_w3, rs_w2, LockWord::kReadBarrierStateMaskShiftedToggled);
   // Zero out except the read barrier bits.
   OpRegRegImm(kOpAnd, rs_w2, rs_w2, LockWord::kReadBarrierStateMaskShifted);
-  LIR* slow_unlock_branch = OpCmpBranch(kCondNe, rs_w3, rs_w1, NULL);
+  LIR* slow_unlock_branch = OpCmpBranch(kCondNe, rs_w3, rs_w1, nullptr);
   GenMemBarrier(kAnyStore);
   LIR* unlock_success_branch;
   if (!kUseReadBarrier) {
     Store32Disp(rs_x0, mirror::Object::MonitorOffset().Int32Value(), rs_w2);
-    unlock_success_branch = OpUnconditionalBranch(NULL);
+    unlock_success_branch = OpUnconditionalBranch(nullptr);
   } else {
     OpRegRegImm(kOpAdd, rs_x3, rs_x0, mirror::Object::MonitorOffset().Int32Value());
     NewLIR3(kA64Stxr3wrX, rw1, rw2, rx3);
-    unlock_success_branch = OpCmpImmBranch(kCondEq, rs_w1, 0, NULL);
+    unlock_success_branch = OpCmpImmBranch(kCondEq, rs_w1, 0, nullptr);
   }
   LIR* slow_path_target = NewLIR0(kPseudoTargetLabel);
   slow_unlock_branch->target = slow_path_target;
diff --git a/compiler/dex/quick/arm64/int_arm64.cc b/compiler/dex/quick/arm64/int_arm64.cc
index b7dbd0a..9340d01 100644
--- a/compiler/dex/quick/arm64/int_arm64.cc
+++ b/compiler/dex/quick/arm64/int_arm64.cc
@@ -803,7 +803,7 @@
   NewLIR2(kA64Ldaxr2rX | wide, r_tmp_stored.GetReg(), r_ptr.GetReg());
   OpRegReg(kOpCmp, r_tmp, rl_expected.reg);
   DCHECK(last_lir_insn_->u.m.def_mask->HasBit(ResourceMask::kCCode));
-  LIR* early_exit = OpCondBranch(kCondNe, NULL);
+  LIR* early_exit = OpCondBranch(kCondNe, nullptr);
   NewLIR3(kA64Stlxr3wrX | wide, r_tmp32.GetReg(), rl_new_value_stored.GetReg(), r_ptr.GetReg());
   NewLIR3(kA64Cmp3RdT, r_tmp32.GetReg(), 0, ENCODE_NO_SHIFT);
   DCHECK(last_lir_insn_->u.m.def_mask->HasBit(ResourceMask::kCCode));
diff --git a/compiler/dex/quick/arm64/utility_arm64.cc b/compiler/dex/quick/arm64/utility_arm64.cc
index e9ad8ba..483231f 100644
--- a/compiler/dex/quick/arm64/utility_arm64.cc
+++ b/compiler/dex/quick/arm64/utility_arm64.cc
@@ -121,7 +121,7 @@
   }
 
   LIR* data_target = ScanLiteralPool(literal_list_, value, 0);
-  if (data_target == NULL) {
+  if (data_target == nullptr) {
     // Wide, as we need 8B alignment.
     data_target = AddWideData(&literal_list_, value, 0);
   }
@@ -148,7 +148,7 @@
   int32_t val_lo = Low32Bits(value);
   int32_t val_hi = High32Bits(value);
   LIR* data_target = ScanLiteralPoolWide(literal_list_, val_lo, val_hi);
-  if (data_target == NULL) {
+  if (data_target == nullptr) {
     data_target = AddWideData(&literal_list_, val_lo, val_hi);
   }
 
@@ -525,7 +525,7 @@
   int32_t val_lo = Low32Bits(value);
   int32_t val_hi = High32Bits(value);
   LIR* data_target = ScanLiteralPoolWide(literal_list_, val_lo, val_hi);
-  if (data_target == NULL) {
+  if (data_target == nullptr) {
     data_target = AddWideData(&literal_list_, val_lo, val_hi);
   }
 
@@ -624,7 +624,7 @@
   }
 
   LOG(FATAL) << "Unexpected encoding operand count";
-  return NULL;
+  return nullptr;
 }
 
 LIR* Arm64Mir2Lir::OpRegRegExtend(OpKind op, RegStorage r_dest_src1, RegStorage r_src2,
@@ -658,7 +658,7 @@
   }
 
   LOG(FATAL) << "Unexpected encoding operand count";
-  return NULL;
+  return nullptr;
 }
 
 LIR* Arm64Mir2Lir::OpRegReg(OpKind op, RegStorage r_dest_src1, RegStorage r_src2) {
@@ -1190,7 +1190,7 @@
  */
 LIR* Arm64Mir2Lir::LoadBaseDispBody(RegStorage r_base, int displacement, RegStorage r_dest,
                                     OpSize size) {
-  LIR* load = NULL;
+  LIR* load = nullptr;
   A64Opcode opcode = kA64Brk1d;
   A64Opcode alt_opcode = kA64Brk1d;
   int scale = 0;
@@ -1286,7 +1286,7 @@
 
 LIR* Arm64Mir2Lir::StoreBaseDispBody(RegStorage r_base, int displacement, RegStorage r_src,
                                      OpSize size) {
-  LIR* store = NULL;
+  LIR* store = nullptr;
   A64Opcode opcode = kA64Brk1d;
   A64Opcode alt_opcode = kA64Brk1d;
   int scale = 0;
diff --git a/compiler/dex/quick/codegen_util.cc b/compiler/dex/quick/codegen_util.cc
index 9f4a318..fb68335 100644
--- a/compiler/dex/quick/codegen_util.cc
+++ b/compiler/dex/quick/codegen_util.cc
@@ -1080,7 +1080,7 @@
   reginfo_map_.reserve(RegStorage::kMaxRegs);
   pointer_storage_.reserve(128);
   slow_paths_.reserve(32);
-  // Reserve pointer id 0 for nullptr.
+  // Reserve pointer id 0 for null.
   size_t null_idx = WrapPointer<void>(nullptr);
   DCHECK_EQ(null_idx, 0U);
 }
diff --git a/compiler/dex/quick/dex_file_method_inliner.cc b/compiler/dex/quick/dex_file_method_inliner.cc
index 4ac6c0c..f5e6c09 100644
--- a/compiler/dex/quick/dex_file_method_inliner.cc
+++ b/compiler/dex/quick/dex_file_method_inliner.cc
@@ -368,9 +368,9 @@
 
 #define UNSAFE_GET_PUT(type, code, type_flags) \
     INTRINSIC(SunMiscUnsafe, Get ## type, ObjectJ_ ## code, kIntrinsicUnsafeGet, \
-              type_flags & ~kIntrinsicFlagIsObject), \
+              type_flags), \
     INTRINSIC(SunMiscUnsafe, Get ## type ## Volatile, ObjectJ_ ## code, kIntrinsicUnsafeGet, \
-              (type_flags | kIntrinsicFlagIsVolatile) & ~kIntrinsicFlagIsObject), \
+              type_flags | kIntrinsicFlagIsVolatile), \
     INTRINSIC(SunMiscUnsafe, Put ## type, ObjectJ ## code ## _V, kIntrinsicUnsafePut, \
               type_flags), \
     INTRINSIC(SunMiscUnsafe, Put ## type ## Volatile, ObjectJ ## code ## _V, kIntrinsicUnsafePut, \
@@ -392,7 +392,7 @@
 
 DexFileMethodInliner::DexFileMethodInliner()
     : lock_("DexFileMethodInliner lock", kDexFileMethodInlinerLock),
-      dex_file_(NULL) {
+      dex_file_(nullptr) {
   static_assert(kClassCacheFirst == 0, "kClassCacheFirst not 0");
   static_assert(arraysize(kClassCacheNames) == kClassCacheLast,
                 "bad arraysize for kClassCacheNames");
@@ -507,6 +507,7 @@
                                     intrinsic.d.data & kIntrinsicFlagIsObject);
     case kIntrinsicUnsafeGet:
       return backend->GenInlinedUnsafeGet(info, intrinsic.d.data & kIntrinsicFlagIsLong,
+                                          intrinsic.d.data & kIntrinsicFlagIsObject,
                                           intrinsic.d.data & kIntrinsicFlagIsVolatile);
     case kIntrinsicUnsafePut:
       return backend->GenInlinedUnsafePut(info, intrinsic.d.data & kIntrinsicFlagIsLong,
@@ -752,6 +753,7 @@
   insn->dalvikInsn.opcode = Instruction::CONST;
   insn->dalvikInsn.vA = move_result->dalvikInsn.vA;
   insn->dalvikInsn.vB = method.d.data;
+  insn->meta.method_lowering_info = invoke->meta.method_lowering_info;  // Preserve type info.
   bb->InsertMIRAfter(move_result, insn);
   return true;
 }
@@ -790,6 +792,7 @@
   insn->dalvikInsn.opcode = opcode;
   insn->dalvikInsn.vA = move_result->dalvikInsn.vA;
   insn->dalvikInsn.vB = arg;
+  insn->meta.method_lowering_info = invoke->meta.method_lowering_info;  // Preserve type info.
   bb->InsertMIRAfter(move_result, insn);
   return true;
 }
@@ -912,6 +915,7 @@
     }
     move->dalvikInsn.vA = move_result->dalvikInsn.vA;
     move->dalvikInsn.vB = return_reg;
+    move->meta.method_lowering_info = invoke->meta.method_lowering_info;  // Preserve type info.
     bb->InsertMIRAfter(insn, move);
   }
   return true;
diff --git a/compiler/dex/quick/gen_common.cc b/compiler/dex/quick/gen_common.cc
index b132c4c..de5e041 100644
--- a/compiler/dex/quick/gen_common.cc
+++ b/compiler/dex/quick/gen_common.cc
@@ -87,7 +87,7 @@
     const RegStorage r_result_;
   };
 
-  LIR* branch = OpCmpImmBranch(kCondEq, r_result, 0, NULL);
+  LIR* branch = OpCmpImmBranch(kCondEq, r_result, 0, nullptr);
   LIR* cont = NewLIR0(kPseudoTargetLabel);
 
   AddSlowPath(new (arena_) CallHelperImmMethodSlowPath(this, branch, cont, trampoline, imm,
@@ -113,10 +113,10 @@
     int32_t offset_of_field = ObjArray::OffsetOfElement(field_info.StorageIndex()).Int32Value();
     LoadRefDisp(r_base, offset_of_field, r_base, kNotVolatile);
   }
-  // r_base now points at static storage (Class*) or nullptr if the type is not yet resolved.
+  // r_base now points at static storage (Class*) or null if the type is not yet resolved.
   LIR* unresolved_branch = nullptr;
   if (!field_info.IsClassInDexCache() && (opt_flags & MIR_CLASS_IS_IN_DEX_CACHE) == 0) {
-    // Check if r_base is nullptr.
+    // Check if r_base is null.
     unresolved_branch = OpCmpImmBranch(kCondEq, r_base, 0, nullptr);
   }
   LIR* uninit_branch = nullptr;
@@ -136,8 +136,8 @@
     class StaticFieldSlowPath : public Mir2Lir::LIRSlowPath {
      public:
       // There are up to two branches to the static field slow path, the "unresolved" when the type
-      // entry in the dex cache is nullptr, and the "uninit" when the class is not yet initialized.
-      // At least one will be non-nullptr here, otherwise we wouldn't generate the slow path.
+      // entry in the dex cache is null, and the "uninit" when the class is not yet initialized.
+      // At least one will be non-null here, otherwise we wouldn't generate the slow path.
       StaticFieldSlowPath(Mir2Lir* m2l, LIR* unresolved, LIR* uninit, LIR* cont, int storage_index,
                           RegStorage r_base_in, RegStorage r_method_in)
           : LIRSlowPath(m2l, unresolved != nullptr ? unresolved : uninit, cont),
@@ -165,7 +165,7 @@
       }
 
      private:
-      // Second branch to the slow path, or nullptr if there's only one branch.
+      // Second branch to the slow path, or null if there's only one branch.
       LIR* const second_branch_;
 
       const int storage_index_;
@@ -173,7 +173,7 @@
       RegStorage r_method_;
     };
 
-    // The slow path is invoked if the r_base is nullptr or the class pointed
+    // The slow path is invoked if the r_base is null or the class pointed
     // to by it is not initialized.
     LIR* cont = NewLIR0(kPseudoTargetLabel);
     AddSlowPath(new (arena_) StaticFieldSlowPath(this, unresolved_branch, uninit_branch, cont,
@@ -319,7 +319,7 @@
 /* Perform an explicit null-check on a register.  */
 LIR* Mir2Lir::GenExplicitNullCheck(RegStorage m_reg, int opt_flags) {
   if (!(cu_->disable_opt & (1 << kNullCheckElimination)) && (opt_flags & MIR_IGNORE_NULL_CHECK)) {
-    return NULL;
+    return nullptr;
   }
   return GenNullCheck(m_reg);
 }
@@ -1188,7 +1188,7 @@
     DCHECK(!IsSameReg(result_reg, object.reg));
   }
   LoadConstant(result_reg, 0);     // assume false
-  LIR* null_branchover = OpCmpImmBranch(kCondEq, object.reg, 0, NULL);
+  LIR* null_branchover = OpCmpImmBranch(kCondEq, object.reg, 0, nullptr);
 
   RegStorage check_class = AllocTypedTemp(false, kRefReg);
   RegStorage object_class = AllocTypedTemp(false, kRefReg);
@@ -1287,7 +1287,7 @@
     // On MIPS and x86_64 rArg0 != rl_result, place false in result if branch is taken.
     LoadConstant(rl_result.reg, 0);
   }
-  LIR* branch1 = OpCmpImmBranch(kCondEq, ref_reg, 0, NULL);
+  LIR* branch1 = OpCmpImmBranch(kCondEq, ref_reg, 0, nullptr);
 
   /* load object->klass_ */
   RegStorage ref_class_reg = TargetReg(kArg1, kRef);  // kArg1 will hold the Class* of ref.
@@ -1295,7 +1295,7 @@
   LoadRefDisp(ref_reg, mirror::Object::ClassOffset().Int32Value(),
               ref_class_reg, kNotVolatile);
   /* kArg0 is ref, kArg1 is ref->klass_, kArg2 is class */
-  LIR* branchover = NULL;
+  LIR* branchover = nullptr;
   if (type_known_final) {
     // rl_result == ref == class.
     GenSelectConst32(ref_class_reg, class_reg, kCondEq, 1, 0, rl_result.reg,
@@ -1320,7 +1320,7 @@
       if (!type_known_abstract) {
         /* Uses branchovers */
         LoadConstant(rl_result.reg, 1);     // assume true
-        branchover = OpCmpBranch(kCondEq, TargetReg(kArg1, kRef), TargetReg(kArg2, kRef), NULL);
+        branchover = OpCmpBranch(kCondEq, TargetReg(kArg1, kRef), TargetReg(kArg2, kRef), nullptr);
       }
 
       OpRegCopy(TargetReg(kArg0, kRef), class_reg);    // .ne case - arg0 <= class
@@ -2088,7 +2088,7 @@
 }
 
 void Mir2Lir::GenConversionCall(QuickEntrypointEnum trampoline, RegLocation rl_dest,
-                                RegLocation rl_src) {
+                                RegLocation rl_src, RegisterClass return_reg_class) {
   /*
    * Don't optimize the register usage since it calls out to support
    * functions
@@ -2097,12 +2097,10 @@
   FlushAllRegs();   /* Send everything to home location */
   CallRuntimeHelperRegLocation(trampoline, rl_src, false);
   if (rl_dest.wide) {
-    RegLocation rl_result;
-    rl_result = GetReturnWide(LocToRegClass(rl_dest));
+    RegLocation rl_result = GetReturnWide(return_reg_class);
     StoreValueWide(rl_dest, rl_result);
   } else {
-    RegLocation rl_result;
-    rl_result = GetReturn(LocToRegClass(rl_dest));
+    RegLocation rl_result = GetReturn(return_reg_class);
     StoreValue(rl_dest, rl_result);
   }
 }
@@ -2131,7 +2129,7 @@
   }
   if (!cu_->compiler_driver->GetCompilerOptions().GetImplicitSuspendChecks()) {
     FlushAllRegs();
-    LIR* branch = OpTestSuspend(NULL);
+    LIR* branch = OpTestSuspend(nullptr);
     LIR* cont = NewLIR0(kPseudoTargetLabel);
     AddSlowPath(new (arena_) SuspendCheckSlowPath(this, branch, cont));
   } else {
diff --git a/compiler/dex/quick/gen_invoke.cc b/compiler/dex/quick/gen_invoke.cc
index db7095d..1eb3a5f 100755
--- a/compiler/dex/quick/gen_invoke.cc
+++ b/compiler/dex/quick/gen_invoke.cc
@@ -882,8 +882,6 @@
         ShortyToRegClass(mir_graph_->GetShortyFromMethodReference(info->method_ref)[0]));
   } else {
     res = info->result;
-    DCHECK_EQ(LocToRegClass(res),
-              ShortyToRegClass(mir_graph_->GetShortyFromMethodReference(info->method_ref)[0]));
   }
   return res;
 }
@@ -896,8 +894,6 @@
         mir_graph_->GetShortyFromMethodReference(info->method_ref)[0]));
   } else {
     res = info->result;
-    DCHECK_EQ(LocToRegClass(res),
-              ShortyToRegClass(mir_graph_->GetShortyFromMethodReference(info->method_ref)[0]));
   }
   return res;
 }
@@ -1338,7 +1334,7 @@
 }
 
 bool Mir2Lir::GenInlinedUnsafeGet(CallInfo* info,
-                                  bool is_long, bool is_volatile) {
+                                  bool is_long, bool is_object, bool is_volatile) {
   if (cu_->instruction_set == kMips || cu_->instruction_set == kMips64) {
     // TODO: add Mips and Mips64 implementations.
     return false;
@@ -1351,7 +1347,7 @@
 
   RegLocation rl_object = LoadValue(rl_src_obj, kRefReg);
   RegLocation rl_offset = LoadValue(rl_src_offset, kCoreReg);
-  RegLocation rl_result = EvalLoc(rl_dest, LocToRegClass(rl_dest), true);
+  RegLocation rl_result = EvalLoc(rl_dest, is_object ? kRefReg : kCoreReg, true);
   if (is_long) {
     if (cu_->instruction_set == kX86 || cu_->instruction_set == kX86_64
         || cu_->instruction_set == kArm64) {
@@ -1411,7 +1407,7 @@
       FreeTemp(rl_temp_offset);
     }
   } else {
-    rl_value = LoadValue(rl_src_value, LocToRegClass(rl_src_value));
+    rl_value = LoadValue(rl_src_value, is_object ? kRefReg : kCoreReg);
     if (rl_value.ref) {
       StoreRefIndexed(rl_object.reg, rl_offset.reg, rl_value.reg, 0);
     } else {
@@ -1499,11 +1495,13 @@
   FreeCallTemps();
   if (info->result.location != kLocInvalid) {
     // We have a following MOVE_RESULT - do it now.
+    RegisterClass reg_class =
+        ShortyToRegClass(mir_graph_->GetShortyFromMethodReference(info->method_ref)[0]);
     if (info->result.wide) {
-      RegLocation ret_loc = GetReturnWide(LocToRegClass(info->result));
+      RegLocation ret_loc = GetReturnWide(reg_class);
       StoreValueWide(info->result, ret_loc);
     } else {
-      RegLocation ret_loc = GetReturn(LocToRegClass(info->result));
+      RegLocation ret_loc = GetReturn(reg_class);
       StoreValue(info->result, ret_loc);
     }
   }
diff --git a/compiler/dex/quick/gen_loadstore.cc b/compiler/dex/quick/gen_loadstore.cc
index 54e5742..4215e8b 100644
--- a/compiler/dex/quick/gen_loadstore.cc
+++ b/compiler/dex/quick/gen_loadstore.cc
@@ -46,7 +46,7 @@
   if (rl_src.location == kLocPhysReg) {
     OpRegCopy(r_dest, rl_src.reg);
   } else if (IsInexpensiveConstant(rl_src)) {
-    // On 64-bit targets, will sign extend.  Make sure constant reference is always NULL.
+    // On 64-bit targets, will sign extend.  Make sure constant reference is always null.
     DCHECK(!rl_src.ref || (mir_graph_->ConstantValue(rl_src) == 0));
     LoadConstantNoClobber(r_dest, mir_graph_->ConstantValue(rl_src));
   } else {
diff --git a/compiler/dex/quick/mips/assemble_mips.cc b/compiler/dex/quick/mips/assemble_mips.cc
index 936ff42..f9b9684 100644
--- a/compiler/dex/quick/mips/assemble_mips.cc
+++ b/compiler/dex/quick/mips/assemble_mips.cc
@@ -613,7 +613,7 @@
       LOG(FATAL) << "Unexpected branch kind " << opcode;
       UNREACHABLE();
   }
-  LIR* hop_target = NULL;
+  LIR* hop_target = nullptr;
   if (!unconditional) {
     hop_target = RawLIR(dalvik_offset, kPseudoTargetLabel);
     LIR* hop_branch = RawLIR(dalvik_offset, opcode, lir->operands[0],
@@ -650,7 +650,7 @@
   LIR *lir;
   AssemblerStatus res = kSuccess;  // Assume success.
 
-  for (lir = first_lir_insn_; lir != NULL; lir = NEXT_LIR(lir)) {
+  for (lir = first_lir_insn_; lir != nullptr; lir = NEXT_LIR(lir)) {
     if (lir->opcode < 0) {
       continue;
     }
@@ -668,7 +668,7 @@
          * (label2 - label1), where label1 is a standard
          * kPseudoTargetLabel and is stored in operands[2].
          * If operands[3] is null, then label2 is a kPseudoTargetLabel
-         * and is found in lir->target.  If operands[3] is non-NULL,
+         * and is found in lir->target.  If operands[3] is non-nullptr,
          * then it is a Switch/Data table.
          */
         int offset1 = UnwrapPointer<LIR>(lir->operands[2])->offset;
@@ -863,7 +863,7 @@
   LIR* lir;
   int offset = 0;
 
-  for (lir = first_lir_insn_; lir != NULL; lir = NEXT_LIR(lir)) {
+  for (lir = first_lir_insn_; lir != nullptr; lir = NEXT_LIR(lir)) {
     lir->offset = offset;
     if (LIKELY(lir->opcode >= 0)) {
       if (!lir->flags.is_nop) {
diff --git a/compiler/dex/quick/mips/call_mips.cc b/compiler/dex/quick/mips/call_mips.cc
index 05570e4..39b9cc7 100644
--- a/compiler/dex/quick/mips/call_mips.cc
+++ b/compiler/dex/quick/mips/call_mips.cc
@@ -112,7 +112,7 @@
   // Test loop.
   RegStorage r_key = AllocTemp();
   LIR* loop_label = NewLIR0(kPseudoTargetLabel);
-  LIR* exit_branch = OpCmpBranch(kCondEq, r_base, r_end, NULL);
+  LIR* exit_branch = OpCmpBranch(kCondEq, r_base, r_end, nullptr);
   Load32Disp(r_base, 0, r_key);
   OpRegImm(kOpAdd, r_base, 8);
   OpCmpBranch(kCondNe, rl_src.reg, r_key, loop_label);
@@ -188,7 +188,7 @@
   tab_rec->anchor = base_label;
 
   // Bounds check - if < 0 or >= size continue following switch.
-  LIR* branch_over = OpCmpImmBranch(kCondHi, r_key, size-1, NULL);
+  LIR* branch_over = OpCmpImmBranch(kCondHi, r_key, size-1, nullptr);
 
   // Materialize the table base pointer.
   RegStorage r_base = AllocPtrSizeTemp();
diff --git a/compiler/dex/quick/mips/int_mips.cc b/compiler/dex/quick/mips/int_mips.cc
index 1ca8bb6..9319c64 100644
--- a/compiler/dex/quick/mips/int_mips.cc
+++ b/compiler/dex/quick/mips/int_mips.cc
@@ -68,7 +68,7 @@
     NewLIR3(kMipsSlt, t0.GetReg(), rl_src1.reg.GetHighReg(), rl_src2.reg.GetHighReg());
     NewLIR3(kMipsSlt, t1.GetReg(), rl_src2.reg.GetHighReg(), rl_src1.reg.GetHighReg());
     NewLIR3(kMipsSubu, rl_result.reg.GetReg(), t1.GetReg(), t0.GetReg());
-    LIR* branch = OpCmpImmBranch(kCondNe, rl_result.reg, 0, NULL);
+    LIR* branch = OpCmpImmBranch(kCondNe, rl_result.reg, 0, nullptr);
     NewLIR3(kMipsSltu, t0.GetReg(), rl_src1.reg.GetLowReg(), rl_src2.reg.GetLowReg());
     NewLIR3(kMipsSltu, t1.GetReg(), rl_src2.reg.GetLowReg(), rl_src1.reg.GetLowReg());
     NewLIR3(kMipsSubu, rl_result.reg.GetReg(), t1.GetReg(), t0.GetReg());
@@ -128,7 +128,7 @@
       break;
     default:
       LOG(FATAL) << "No support for ConditionCode: " << cond;
-      return NULL;
+      return nullptr;
   }
   if (cmp_zero) {
     branch = NewLIR2(br_op, src1.GetReg(), src2.GetReg());
@@ -278,7 +278,7 @@
   // Implement as a branch-over.
   // TODO: Conditional move?
   LoadConstant(rs_dest, true_val);
-  LIR* ne_branchover = OpCmpBranch(code, left_op, right_op, NULL);
+  LIR* ne_branchover = OpCmpBranch(code, left_op, right_op, nullptr);
   LoadConstant(rs_dest, false_val);
   LIR* target_label = NewLIR0(kPseudoTargetLabel);
   ne_branchover->target = target_label;
@@ -447,7 +447,7 @@
 // Test suspend flag, return target of taken suspend branch.
 LIR* MipsMir2Lir::OpTestSuspend(LIR* target) {
   OpRegImm(kOpSub, TargetPtrReg(kSuspend), 1);
-  return OpCmpImmBranch((target == NULL) ? kCondEq : kCondNe, TargetPtrReg(kSuspend), 0, target);
+  return OpCmpImmBranch((target == nullptr) ? kCondEq : kCondNe, TargetPtrReg(kSuspend), 0, target);
 }
 
 // Decrement register and branch on condition.
diff --git a/compiler/dex/quick/mips/utility_mips.cc b/compiler/dex/quick/mips/utility_mips.cc
index 8ab5422..95c61cd 100644
--- a/compiler/dex/quick/mips/utility_mips.cc
+++ b/compiler/dex/quick/mips/utility_mips.cc
@@ -566,7 +566,7 @@
 /* Load value from base + scaled index. */
 LIR* MipsMir2Lir::LoadBaseIndexed(RegStorage r_base, RegStorage r_index, RegStorage r_dest,
                                   int scale, OpSize size) {
-  LIR *first = NULL;
+  LIR *first = nullptr;
   LIR *res;
   MipsOpCode opcode = kMipsNop;
   bool is64bit = cu_->target64 && r_dest.Is64Bit();
@@ -640,7 +640,7 @@
 // Store value base base + scaled index.
 LIR* MipsMir2Lir::StoreBaseIndexed(RegStorage r_base, RegStorage r_index, RegStorage r_src,
                                    int scale, OpSize size) {
-  LIR *first = NULL;
+  LIR *first = nullptr;
   MipsOpCode opcode = kMipsNop;
   RegStorage t_reg = AllocTemp();
 
@@ -696,8 +696,8 @@
  * rlp and then restore.
  */
   LIR *res;
-  LIR *load = NULL;
-  LIR *load2 = NULL;
+  LIR *load = nullptr;
+  LIR *load2 = nullptr;
   MipsOpCode opcode = kMipsNop;
   bool short_form = IS_SIMM16(displacement);
   bool is64bit = false;
@@ -857,8 +857,8 @@
 LIR* MipsMir2Lir::StoreBaseDispBody(RegStorage r_base, int displacement, RegStorage r_src,
                                     OpSize size) {
   LIR *res;
-  LIR *store = NULL;
-  LIR *store2 = NULL;
+  LIR *store = nullptr;
+  LIR *store2 = nullptr;
   MipsOpCode opcode = kMipsNop;
   bool short_form = IS_SIMM16(displacement);
   bool is64bit = false;
diff --git a/compiler/dex/quick/mir_to_lir.cc b/compiler/dex/quick/mir_to_lir.cc
index 961cd4f..e9e9161 100644
--- a/compiler/dex/quick/mir_to_lir.cc
+++ b/compiler/dex/quick/mir_to_lir.cc
@@ -104,19 +104,6 @@
   return res;
 }
 
-RegisterClass Mir2Lir::LocToRegClass(RegLocation loc) {
-  RegisterClass res;
-  if (loc.fp) {
-    DCHECK(!loc.ref) << "At most, one of ref/fp may be set";
-    res = kFPReg;
-  } else if (loc.ref) {
-    res = kRefReg;
-  } else {
-    res = kCoreReg;
-  }
-  return res;
-}
-
 void Mir2Lir::LockArg(size_t in_position) {
   RegStorage reg_arg = in_to_reg_storage_mapping_.GetReg(in_position);
 
@@ -560,25 +547,20 @@
       if (!kLeafOptimization || !mir_graph_->MethodIsLeaf()) {
         GenSuspendTest(opt_flags);
       }
-      DCHECK_EQ(LocToRegClass(rl_src[0]), ShortyToRegClass(cu_->shorty[0]));
-      StoreValue(GetReturn(LocToRegClass(rl_src[0])), rl_src[0]);
+      StoreValue(GetReturn(ShortyToRegClass(cu_->shorty[0])), rl_src[0]);
       break;
 
     case Instruction::RETURN_WIDE:
       if (!kLeafOptimization || !mir_graph_->MethodIsLeaf()) {
         GenSuspendTest(opt_flags);
       }
-      DCHECK_EQ(LocToRegClass(rl_src[0]), ShortyToRegClass(cu_->shorty[0]));
-      StoreValueWide(GetReturnWide(LocToRegClass(rl_src[0])), rl_src[0]);
-      break;
-
-    case Instruction::MOVE_RESULT_WIDE:
-      StoreValueWide(rl_dest, GetReturnWide(LocToRegClass(rl_dest)));
+      StoreValueWide(GetReturnWide(ShortyToRegClass(cu_->shorty[0])), rl_src[0]);
       break;
 
     case Instruction::MOVE_RESULT:
+    case Instruction::MOVE_RESULT_WIDE:
     case Instruction::MOVE_RESULT_OBJECT:
-      StoreValue(rl_dest, GetReturn(LocToRegClass(rl_dest)));
+      // Already processed with invoke or filled-new-array.
       break;
 
     case Instruction::MOVE:
@@ -1237,7 +1219,7 @@
   block_label_list_[block_id].flags.fixup = kFixupLabel;
   AppendLIR(&block_label_list_[block_id]);
 
-  LIR* head_lir = NULL;
+  LIR* head_lir = nullptr;
 
   // If this is a catch block, export the start address.
   if (bb->catch_entry) {
@@ -1263,7 +1245,7 @@
     DCHECK_EQ(cfi_.GetCurrentCFAOffset(), frame_size_);
   }
 
-  for (mir = bb->first_mir_insn; mir != NULL; mir = mir->next) {
+  for (mir = bb->first_mir_insn; mir != nullptr; mir = mir->next) {
     ResetRegPool();
     if (cu_->disable_opt & (1 << kTrackLiveTemps)) {
       ClobberAllTemps();
@@ -1287,7 +1269,7 @@
     GenPrintLabel(mir);
 
     // Remember the first LIR for this block.
-    if (head_lir == NULL) {
+    if (head_lir == nullptr) {
       head_lir = &block_label_list_[bb->id];
       // Set the first label as a scheduling barrier.
       DCHECK(!head_lir->flags.use_def_invalid);
@@ -1327,7 +1309,7 @@
   cu_->NewTimingSplit("SpecialMIR2LIR");
   // Find the first DalvikByteCode block.
   DCHECK_EQ(mir_graph_->GetNumReachableBlocks(), mir_graph_->GetDfsOrder().size());
-  BasicBlock*bb = NULL;
+  BasicBlock*bb = nullptr;
   for (BasicBlockId dfs_id : mir_graph_->GetDfsOrder()) {
     BasicBlock* candidate = mir_graph_->GetBasicBlock(dfs_id);
     if (candidate->block_type == kDalvikByteCode) {
@@ -1335,11 +1317,11 @@
       break;
     }
   }
-  if (bb == NULL) {
+  if (bb == nullptr) {
     return false;
   }
   DCHECK_EQ(bb->start_offset, 0);
-  DCHECK(bb->first_mir_insn != NULL);
+  DCHECK(bb->first_mir_insn != nullptr);
 
   // Get the first instruction.
   MIR* mir = bb->first_mir_insn;
@@ -1361,17 +1343,17 @@
   PreOrderDfsIterator iter(mir_graph_);
   BasicBlock* curr_bb = iter.Next();
   BasicBlock* next_bb = iter.Next();
-  while (curr_bb != NULL) {
+  while (curr_bb != nullptr) {
     MethodBlockCodeGen(curr_bb);
     // If the fall_through block is no longer laid out consecutively, drop in a branch.
     BasicBlock* curr_bb_fall_through = mir_graph_->GetBasicBlock(curr_bb->fall_through);
-    if ((curr_bb_fall_through != NULL) && (curr_bb_fall_through != next_bb)) {
+    if ((curr_bb_fall_through != nullptr) && (curr_bb_fall_through != next_bb)) {
       OpUnconditionalBranch(&block_label_list_[curr_bb->fall_through]);
     }
     curr_bb = next_bb;
     do {
       next_bb = iter.Next();
-    } while ((next_bb != NULL) && (next_bb->block_type == kDead));
+    } while ((next_bb != nullptr) && (next_bb->block_type == kDead));
   }
   HandleSlowPaths();
 }
diff --git a/compiler/dex/quick/mir_to_lir.h b/compiler/dex/quick/mir_to_lir.h
index db59714..8f08a51 100644
--- a/compiler/dex/quick/mir_to_lir.h
+++ b/compiler/dex/quick/mir_to_lir.h
@@ -388,7 +388,7 @@
       LIR* DefEnd() { return def_end_; }
       void SetDefEnd(LIR* def_end) { def_end_ = def_end; }
       void ResetDefBody() { def_start_ = def_end_ = nullptr; }
-      // Find member of aliased set matching storage_used; return nullptr if none.
+      // Find member of aliased set matching storage_used; return null if none.
       RegisterInfo* FindMatchingView(uint32_t storage_used) {
         RegisterInfo* res = Master();
         for (; res != nullptr; res = res->GetAliasChain()) {
@@ -605,7 +605,7 @@
     char* ArenaStrdup(const char* str) {
       size_t len = strlen(str) + 1;
       char* res = arena_->AllocArray<char>(len, kArenaAllocMisc);
-      if (res != NULL) {
+      if (res != nullptr) {
         strncpy(res, str, len);
       }
       return res;
@@ -634,7 +634,6 @@
     }
 
     RegisterClass ShortyToRegClass(char shorty_type);
-    RegisterClass LocToRegClass(RegLocation loc);
     int ComputeFrameSize();
     void Materialize();
     virtual CompiledMethod* GetCompiledMethod();
@@ -651,7 +650,7 @@
     void DumpPromotionMap();
     void CodegenDump();
     LIR* RawLIR(DexOffset dalvik_offset, int opcode, int op0 = 0, int op1 = 0,
-                int op2 = 0, int op3 = 0, int op4 = 0, LIR* target = NULL);
+                int op2 = 0, int op3 = 0, int op4 = 0, LIR* target = nullptr);
     LIR* NewLIR0(int opcode);
     LIR* NewLIR1(int opcode, int dest);
     LIR* NewLIR2(int opcode, int dest, int src1);
@@ -846,7 +845,8 @@
                           RegLocation rl_src, int lit);
     virtual void GenArithOpLong(Instruction::Code opcode, RegLocation rl_dest,
                                 RegLocation rl_src1, RegLocation rl_src2, int flags);
-    void GenConversionCall(QuickEntrypointEnum trampoline, RegLocation rl_dest, RegLocation rl_src);
+    void GenConversionCall(QuickEntrypointEnum trampoline, RegLocation rl_dest, RegLocation rl_src,
+                           RegisterClass return_reg_class);
     void GenSuspendTest(int opt_flags);
     void GenSuspendTestAndBranch(int opt_flags, LIR* target);
 
@@ -954,7 +954,7 @@
     virtual bool GenInlinedIndexOf(CallInfo* info, bool zero_based);
     bool GenInlinedStringCompareTo(CallInfo* info);
     virtual bool GenInlinedCurrentThread(CallInfo* info);
-    bool GenInlinedUnsafeGet(CallInfo* info, bool is_long, bool is_volatile);
+    bool GenInlinedUnsafeGet(CallInfo* info, bool is_long, bool is_object, bool is_volatile);
     bool GenInlinedUnsafePut(CallInfo* info, bool is_long, bool is_object,
                              bool is_volatile, bool is_ordered);
 
@@ -1120,8 +1120,8 @@
      * @param base_reg The register holding the base address.
      * @param offset The offset from the base.
      * @param check_value The immediate to compare to.
-     * @param target branch target (or nullptr)
-     * @param compare output for getting LIR for comparison (or nullptr)
+     * @param target branch target (or null)
+     * @param compare output for getting LIR for comparison (or null)
      * @returns The branch instruction that was generated.
      */
     virtual LIR* OpCmpMemImmBranch(ConditionCode cond, RegStorage temp_reg, RegStorage base_reg,
@@ -1854,7 +1854,7 @@
     // to deduplicate the masks.
     ResourceMaskCache mask_cache_;
 
-    // Record the MIR that generated a given safepoint (nullptr for prologue safepoints).
+    // Record the MIR that generated a given safepoint (null for prologue safepoints).
     ArenaVector<std::pair<LIR*, MIR*>> safepoints_;
 
     // The layout of the cu_->dex_file's dex cache arrays for PC-relative addressing.
@@ -1869,7 +1869,7 @@
     // For architectures that don't have true PC-relative addressing (see pc_rel_temp_
     // above) and also have a limited range of offsets for loads, it's be useful to
     // know the minimum offset into the dex cache arrays, so we calculate that as well
-    // if pc_rel_temp_ isn't nullptr.
+    // if pc_rel_temp_ isn't null.
     uint32_t dex_cache_arrays_min_offset_;
 
     dwarf::LazyDebugFrameOpCodeWriter cfi_;
diff --git a/compiler/dex/quick/quick_cfi_test.cc b/compiler/dex/quick/quick_cfi_test.cc
index 555d5b9..b3c7355 100644
--- a/compiler/dex/quick/quick_cfi_test.cc
+++ b/compiler/dex/quick/quick_cfi_test.cc
@@ -100,7 +100,7 @@
       }
     }
     m2l->AdjustSpillMask();
-    m2l->GenEntrySequence(NULL, m2l->LocCReturnRef());
+    m2l->GenEntrySequence(nullptr, m2l->LocCReturnRef());
     m2l->GenExitSequence();
     m2l->HandleSlowPaths();
     m2l->AssembleLIR();
diff --git a/compiler/dex/quick/quick_compiler.cc b/compiler/dex/quick/quick_compiler.cc
index fc3e687..39eb117 100644
--- a/compiler/dex/quick/quick_compiler.cc
+++ b/compiler/dex/quick/quick_compiler.cc
@@ -102,7 +102,7 @@
 static_assert(sizeof(kDisabledOptimizationsPerISA) == 8 * sizeof(uint32_t),
               "kDisabledOpts unexpected");
 
-// Supported shorty types per instruction set. nullptr means that all are available.
+// Supported shorty types per instruction set. null means that all are available.
 // Z : boolean
 // B : byte
 // S : short
@@ -422,7 +422,7 @@
     Instruction::INVOKE_VIRTUAL_RANGE_QUICK,
 };
 
-// Unsupported opcodes. nullptr can be used when everything is supported. Size of the lists is
+// Unsupported opcodes. null can be used when everything is supported. Size of the lists is
 // recorded below.
 static const int* kUnsupportedOpcodes[] = {
     // 0 = kNone.
@@ -515,7 +515,7 @@
 
   for (unsigned int idx = 0; idx < cu->mir_graph->GetNumBlocks(); idx++) {
     BasicBlock* bb = cu->mir_graph->GetBasicBlock(idx);
-    if (bb == NULL) continue;
+    if (bb == nullptr) continue;
     if (bb->block_type == kDead) continue;
     for (MIR* mir = bb->first_mir_insn; mir != nullptr; mir = mir->next) {
       int opcode = mir->dalvikInsn.opcode;
diff --git a/compiler/dex/quick/ralloc_util.cc b/compiler/dex/quick/ralloc_util.cc
index e779479..8ec86fa 100644
--- a/compiler/dex/quick/ralloc_util.cc
+++ b/compiler/dex/quick/ralloc_util.cc
@@ -935,7 +935,7 @@
       RegStorage my_reg = info->GetReg();
       RegStorage partner_reg = info->Partner();
       RegisterInfo* partner = GetRegInfo(partner_reg);
-      DCHECK(partner != NULL);
+      DCHECK(partner != nullptr);
       DCHECK(partner->IsWide());
       DCHECK_EQ(my_reg.GetReg(), partner->Partner().GetReg());
       DCHECK(partner->IsLive());
diff --git a/compiler/dex/quick/x86/assemble_x86.cc b/compiler/dex/quick/x86/assemble_x86.cc
index af19f5e..eb33357 100644
--- a/compiler/dex/quick/x86/assemble_x86.cc
+++ b/compiler/dex/quick/x86/assemble_x86.cc
@@ -1633,7 +1633,7 @@
   AssemblerStatus res = kSuccess;  // Assume success
 
   const bool kVerbosePcFixup = false;
-  for (lir = first_lir_insn_; lir != NULL; lir = NEXT_LIR(lir)) {
+  for (lir = first_lir_insn_; lir != nullptr; lir = NEXT_LIR(lir)) {
     if (IsPseudoLirOp(lir->opcode)) {
       continue;
     }
@@ -1646,7 +1646,7 @@
       switch (lir->opcode) {
         case kX86Jcc8: {
           LIR *target_lir = lir->target;
-          DCHECK(target_lir != NULL);
+          DCHECK(target_lir != nullptr);
           int delta = 0;
           CodeOffset pc;
           if (IS_SIMM8(lir->operands[0])) {
@@ -1679,7 +1679,7 @@
         }
         case kX86Jcc32: {
           LIR *target_lir = lir->target;
-          DCHECK(target_lir != NULL);
+          DCHECK(target_lir != nullptr);
           CodeOffset pc = lir->offset + 6 /* 2 byte opcode + rel32 */;
           CodeOffset target = target_lir->offset;
           int delta = target - pc;
@@ -1695,7 +1695,7 @@
         }
         case kX86Jecxz8: {
           LIR *target_lir = lir->target;
-          DCHECK(target_lir != NULL);
+          DCHECK(target_lir != nullptr);
           CodeOffset pc;
           pc = lir->offset + 2;  // opcode + rel8
           CodeOffset target = target_lir->offset;
@@ -1706,7 +1706,7 @@
         }
         case kX86Jmp8: {
           LIR *target_lir = lir->target;
-          DCHECK(target_lir != NULL);
+          DCHECK(target_lir != nullptr);
           int delta = 0;
           CodeOffset pc;
           if (IS_SIMM8(lir->operands[0])) {
@@ -1738,7 +1738,7 @@
         }
         case kX86Jmp32: {
           LIR *target_lir = lir->target;
-          DCHECK(target_lir != NULL);
+          DCHECK(target_lir != nullptr);
           CodeOffset pc = lir->offset + 5 /* opcode + rel32 */;
           CodeOffset target = target_lir->offset;
           int delta = target - pc;
@@ -1748,7 +1748,7 @@
         default:
           if (lir->flags.fixup == kFixupLoad) {
             LIR *target_lir = lir->target;
-            DCHECK(target_lir != NULL);
+            DCHECK(target_lir != nullptr);
             CodeOffset target = target_lir->offset;
             // Handle 64 bit RIP addressing.
             if (lir->operands[1] == kRIPReg) {
@@ -1950,7 +1950,7 @@
   LIR* lir;
   int offset = 0;
 
-  for (lir = first_lir_insn_; lir != NULL; lir = NEXT_LIR(lir)) {
+  for (lir = first_lir_insn_; lir != nullptr; lir = NEXT_LIR(lir)) {
     lir->offset = offset;
     if (LIKELY(!IsPseudoLirOp(lir->opcode))) {
       if (!lir->flags.is_nop) {
diff --git a/compiler/dex/quick/x86/call_x86.cc b/compiler/dex/quick/x86/call_x86.cc
index d7a5eb0..e2364d8 100644
--- a/compiler/dex/quick/x86/call_x86.cc
+++ b/compiler/dex/quick/x86/call_x86.cc
@@ -80,7 +80,7 @@
 
   // Bounds check - if < 0 or >= size continue following switch
   OpRegImm(kOpCmp, keyReg, size - 1);
-  LIR* branch_over = OpCondBranch(kCondHi, NULL);
+  LIR* branch_over = OpCondBranch(kCondHi, nullptr);
 
   RegStorage addr_for_jump;
   if (cu_->target64) {
diff --git a/compiler/dex/quick/x86/fp_x86.cc b/compiler/dex/quick/x86/fp_x86.cc
index cfe0480..8e81746 100755
--- a/compiler/dex/quick/x86/fp_x86.cc
+++ b/compiler/dex/quick/x86/fp_x86.cc
@@ -309,7 +309,8 @@
         branch_normal->target = NewLIR0(kPseudoTargetLabel);
         StoreValueWide(rl_dest, rl_result);
       } else {
-        GenConversionCall(kQuickF2l, rl_dest, rl_src);
+        CheckEntrypointTypes<kQuickF2l, int64_t, float>();  // int64_t -> kCoreReg
+        GenConversionCall(kQuickF2l, rl_dest, rl_src, kCoreReg);
       }
       return;
     case Instruction::DOUBLE_TO_LONG:
@@ -334,7 +335,8 @@
         branch_normal->target = NewLIR0(kPseudoTargetLabel);
         StoreValueWide(rl_dest, rl_result);
       } else {
-        GenConversionCall(kQuickD2l, rl_dest, rl_src);
+        CheckEntrypointTypes<kQuickD2l, int64_t, double>();  // int64_t -> kCoreReg
+        GenConversionCall(kQuickD2l, rl_dest, rl_src, kCoreReg);
       }
       return;
     default:
@@ -482,13 +484,13 @@
   } else {
     NewLIR2(kX86UcomisdRR, rl_src1.reg.GetReg(), rl_src2.reg.GetReg());
   }
-  LIR* branch = NULL;
+  LIR* branch = nullptr;
   if (unordered_gt) {
     branch = NewLIR2(kX86Jcc8, 0, kX86CondPE);
   }
   // If the result reg can't be byte accessed, use a jump and move instead of a set.
   if (!IsByteRegister(rl_result.reg)) {
-    LIR* branch2 = NULL;
+    LIR* branch2 = nullptr;
     if (unordered_gt) {
       branch2 = NewLIR2(kX86Jcc8, 0, kX86CondA);
       NewLIR2(kX86Mov32RI, rl_result.reg.GetReg(), 0x0);
@@ -511,7 +513,7 @@
                                      bool is_double) {
   LIR* taken = &block_label_list_[bb->taken];
   LIR* not_taken = &block_label_list_[bb->fall_through];
-  LIR* branch = NULL;
+  LIR* branch = nullptr;
   RegLocation rl_src1;
   RegLocation rl_src2;
   if (is_double) {
diff --git a/compiler/dex/quick/x86/int_x86.cc b/compiler/dex/quick/x86/int_x86.cc
index 1043815..943bfc0 100755
--- a/compiler/dex/quick/x86/int_x86.cc
+++ b/compiler/dex/quick/x86/int_x86.cc
@@ -1229,7 +1229,7 @@
     LockTemp(rs_r0);
 
     RegLocation rl_object = LoadValue(rl_src_obj, kRefReg);
-    RegLocation rl_new_value = LoadValue(rl_src_new_value, LocToRegClass(rl_src_new_value));
+    RegLocation rl_new_value = LoadValue(rl_src_new_value, is_object ? kRefReg : kCoreReg);
 
     if (is_object && !mir_graph_->IsConstantNullRef(rl_new_value)) {
       // Mark card for object assuming new value is stored.
@@ -1569,7 +1569,7 @@
   } else {
     OpTlsCmp(Thread::ThreadFlagsOffset<4>(), 0);
   }
-  return OpCondBranch((target == NULL) ? kCondNe : kCondEq, target);
+  return OpCondBranch((target == nullptr) ? kCondNe : kCondEq, target);
 }
 
 // Decrement register and branch on condition
@@ -3005,7 +3005,7 @@
 
   // Assume that there is no match.
   LoadConstant(result_reg, 0);
-  LIR* null_branchover = OpCmpImmBranch(kCondEq, object.reg, 0, NULL);
+  LIR* null_branchover = OpCmpImmBranch(kCondEq, object.reg, 0, nullptr);
 
   // We will use this register to compare to memory below.
   // References are 32 bit in memory, and 64 bit in registers (in 64 bit mode).
diff --git a/compiler/dex/quick/x86/target_x86.cc b/compiler/dex/quick/x86/target_x86.cc
index a16e242..b460379 100755
--- a/compiler/dex/quick/x86/target_x86.cc
+++ b/compiler/dex/quick/x86/target_x86.cc
@@ -1281,7 +1281,7 @@
   RegLocation rl_return = GetReturn(kCoreReg);
   RegLocation rl_dest = InlineTarget(info);
 
-  // Is the string non-NULL?
+  // Is the string non-null?
   LoadValueDirectFixed(rl_obj, rs_rDX);
   GenNullCheck(rs_rDX, info->opt_flags);
   info->opt_flags |= MIR_IGNORE_NULL_CHECK;  // Record that we've null checked.
diff --git a/compiler/dex/quick/x86/utility_x86.cc b/compiler/dex/quick/x86/utility_x86.cc
index efcb9ee..61a1bec 100644
--- a/compiler/dex/quick/x86/utility_x86.cc
+++ b/compiler/dex/quick/x86/utility_x86.cc
@@ -578,7 +578,7 @@
       } else if (pc_rel_base_reg_.Valid() || cu_->target64) {
         // We will load the value from the literal area.
         LIR* data_target = ScanLiteralPoolWide(literal_list_, val_lo, val_hi);
-        if (data_target == NULL) {
+        if (data_target == nullptr) {
           data_target = AddWideData(&literal_list_, val_lo, val_hi);
         }
 
@@ -642,8 +642,8 @@
 
 LIR* X86Mir2Lir::LoadBaseIndexedDisp(RegStorage r_base, RegStorage r_index, int scale,
                                      int displacement, RegStorage r_dest, OpSize size) {
-  LIR *load = NULL;
-  LIR *load2 = NULL;
+  LIR *load = nullptr;
+  LIR *load2 = nullptr;
   bool is_array = r_index.Valid();
   bool pair = r_dest.IsPair();
   bool is64bit = ((size == k64) || (size == kDouble));
@@ -763,7 +763,7 @@
     }
   }
 
-  // Always return first load generated as this might cause a fault if base is nullptr.
+  // Always return first load generated as this might cause a fault if base is null.
   return load;
 }
 
@@ -791,8 +791,8 @@
 LIR* X86Mir2Lir::StoreBaseIndexedDisp(RegStorage r_base, RegStorage r_index, int scale,
                                       int displacement, RegStorage r_src, OpSize size,
                                       int opt_flags) {
-  LIR *store = NULL;
-  LIR *store2 = NULL;
+  LIR *store = nullptr;
+  LIR *store2 = nullptr;
   bool is_array = r_index.Valid();
   bool pair = r_src.IsPair();
   bool is64bit = (size == k64) || (size == kDouble);
diff --git a/compiler/dex/ssa_transformation.cc b/compiler/dex/ssa_transformation.cc
index 197f66d..939bf40 100644
--- a/compiler/dex/ssa_transformation.cc
+++ b/compiler/dex/ssa_transformation.cc
@@ -26,15 +26,15 @@
 
 void MIRGraph::ClearAllVisitedFlags() {
   AllNodesIterator iter(this);
-  for (BasicBlock* bb = iter.Next(); bb != NULL; bb = iter.Next()) {
+  for (BasicBlock* bb = iter.Next(); bb != nullptr; bb = iter.Next()) {
     bb->visited = false;
   }
 }
 
 BasicBlock* MIRGraph::NeedsVisit(BasicBlock* bb) {
-  if (bb != NULL) {
+  if (bb != nullptr) {
     if (bb->visited || bb->hidden) {
-      bb = NULL;
+      bb = nullptr;
     }
   }
   return bb;
@@ -42,13 +42,13 @@
 
 BasicBlock* MIRGraph::NextUnvisitedSuccessor(BasicBlock* bb) {
   BasicBlock* res = NeedsVisit(GetBasicBlock(bb->fall_through));
-  if (res == NULL) {
+  if (res == nullptr) {
     res = NeedsVisit(GetBasicBlock(bb->taken));
-    if (res == NULL) {
+    if (res == nullptr) {
       if (bb->successor_block_list_type != kNotUsed) {
         for (SuccessorBlockInfo* sbi : bb->successor_blocks) {
           res = NeedsVisit(GetBasicBlock(sbi->block));
-          if (res != NULL) {
+          if (res != nullptr) {
             break;
           }
         }
@@ -75,7 +75,7 @@
   while (!succ.empty()) {
     BasicBlock* curr = succ.back();
     BasicBlock* next_successor = NextUnvisitedSuccessor(curr);
-    if (next_successor != NULL) {
+    if (next_successor != nullptr) {
       MarkPreOrder(next_successor);
       succ.push_back(next_successor);
       continue;
@@ -107,7 +107,7 @@
   if (num_reachable_blocks_ != GetNumBlocks()) {
     // Kill all unreachable blocks.
     AllNodesIterator iter(this);
-    for (BasicBlock* bb = iter.Next(); bb != NULL; bb = iter.Next()) {
+    for (BasicBlock* bb = iter.Next(); bb != nullptr; bb = iter.Next()) {
       if (!bb->visited) {
         bb->Kill(this);
       }
@@ -121,7 +121,7 @@
  * register idx is defined in BasicBlock bb.
  */
 bool MIRGraph::FillDefBlockMatrix(BasicBlock* bb) {
-  if (bb->data_flow_info == NULL) {
+  if (bb->data_flow_info == nullptr) {
     return false;
   }
 
@@ -149,11 +149,11 @@
   }
 
   AllNodesIterator iter(this);
-  for (BasicBlock* bb = iter.Next(); bb != NULL; bb = iter.Next()) {
+  for (BasicBlock* bb = iter.Next(); bb != nullptr; bb = iter.Next()) {
     FindLocalLiveIn(bb);
   }
   AllNodesIterator iter2(this);
-  for (BasicBlock* bb = iter2.Next(); bb != NULL; bb = iter2.Next()) {
+  for (BasicBlock* bb = iter2.Next(); bb != nullptr; bb = iter2.Next()) {
     FillDefBlockMatrix(bb);
   }
 
@@ -247,7 +247,7 @@
 void MIRGraph::InitializeDominationInfo(BasicBlock* bb) {
   int num_total_blocks = GetBasicBlockListCount();
 
-  if (bb->dominators == NULL) {
+  if (bb->dominators == nullptr) {
     bb->dominators = new (arena_) ArenaBitVector(arena_, num_total_blocks,
                                                  true /* expandable */, kBitMapDominators);
     bb->i_dominated = new (arena_) ArenaBitVector(arena_, num_total_blocks,
@@ -357,7 +357,7 @@
 
   /* Initialize domination-related data structures */
   PreOrderDfsIterator iter(this);
-  for (BasicBlock* bb = iter.Next(); bb != NULL; bb = iter.Next()) {
+  for (BasicBlock* bb = iter.Next(); bb != nullptr; bb = iter.Next()) {
     InitializeDominationInfo(bb);
   }
 
@@ -376,7 +376,7 @@
   /* Compute the immediate dominators */
   RepeatingReversePostOrderDfsIterator iter2(this);
   bool change = false;
-  for (BasicBlock* bb = iter2.Next(false); bb != NULL; bb = iter2.Next(change)) {
+  for (BasicBlock* bb = iter2.Next(false); bb != nullptr; bb = iter2.Next(change)) {
     change = ComputeblockIDom(bb);
   }
 
@@ -387,19 +387,19 @@
   GetEntryBlock()->i_dom = 0;
 
   PreOrderDfsIterator iter3(this);
-  for (BasicBlock* bb = iter3.Next(); bb != NULL; bb = iter3.Next()) {
+  for (BasicBlock* bb = iter3.Next(); bb != nullptr; bb = iter3.Next()) {
     SetDominators(bb);
   }
 
   ReversePostOrderDfsIterator iter4(this);
-  for (BasicBlock* bb = iter4.Next(); bb != NULL; bb = iter4.Next()) {
+  for (BasicBlock* bb = iter4.Next(); bb != nullptr; bb = iter4.Next()) {
     ComputeBlockDominators(bb);
   }
 
   // Compute the dominance frontier for each block.
   ComputeDomPostOrderTraversal(GetEntryBlock());
   PostOrderDOMIterator iter5(this);
-  for (BasicBlock* bb = iter5.Next(); bb != NULL; bb = iter5.Next()) {
+  for (BasicBlock* bb = iter5.Next(); bb != nullptr; bb = iter5.Next()) {
     ComputeDominanceFrontier(bb);
   }
 
@@ -434,7 +434,7 @@
   DCHECK_EQ(temp_.ssa.num_vregs, cu_->mir_graph.get()->GetNumOfCodeAndTempVRs());
   ArenaBitVector* temp_live_vregs = temp_.ssa.work_live_vregs;
 
-  if (bb->data_flow_info == NULL) {
+  if (bb->data_flow_info == nullptr) {
     return false;
   }
   temp_live_vregs->Copy(bb->data_flow_info->live_in_v);
@@ -466,7 +466,7 @@
 void MIRGraph::FindPhiNodeBlocks() {
   RepeatingPostOrderDfsIterator iter(this);
   bool change = false;
-  for (BasicBlock* bb = iter.Next(false); bb != NULL; bb = iter.Next(change)) {
+  for (BasicBlock* bb = iter.Next(false); bb != nullptr; bb = iter.Next(change)) {
     change = ComputeBlockLiveIns(bb);
   }
 
@@ -505,7 +505,7 @@
  */
 bool MIRGraph::InsertPhiNodeOperands(BasicBlock* bb) {
   /* Phi nodes are at the beginning of each block */
-  for (MIR* mir = bb->first_mir_insn; mir != NULL; mir = mir->next) {
+  for (MIR* mir = bb->first_mir_insn; mir != nullptr; mir = mir->next) {
     if (mir->dalvikInsn.opcode != static_cast<Instruction::Code>(kMirOpPhi))
       return true;
     int ssa_reg = mir->ssa_rep->defs[0];
diff --git a/compiler/dex/type_inference.cc b/compiler/dex/type_inference.cc
new file mode 100644
index 0000000..84cd69a
--- /dev/null
+++ b/compiler/dex/type_inference.cc
@@ -0,0 +1,1064 @@
+/*
+ * Copyright (C) 2015 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "type_inference.h"
+
+#include "base/bit_vector-inl.h"
+#include "compiler_ir.h"
+#include "dataflow_iterator-inl.h"
+#include "dex_flags.h"
+#include "dex_file-inl.h"
+#include "driver/dex_compilation_unit.h"
+#include "mir_field_info.h"
+#include "mir_graph.h"
+#include "mir_method_info.h"
+
+namespace art {
+
+inline TypeInference::Type TypeInference::Type::ArrayType(uint32_t array_depth, Type nested_type) {
+  DCHECK_NE(array_depth, 0u);
+  return Type(kFlagNarrow | kFlagRef | kFlagLowWord | (array_depth << kBitArrayDepthStart) |
+              ((nested_type.raw_bits_ & kMaskWideAndType) << kArrayTypeShift));
+}
+
+inline TypeInference::Type TypeInference::Type::ArrayTypeFromComponent(Type component_type) {
+  if (component_type.ArrayDepth() == 0u) {
+    return ArrayType(1u, component_type);
+  }
+  if (UNLIKELY(component_type.ArrayDepth() == kMaxArrayDepth)) {
+    return component_type;
+  }
+  return Type(component_type.raw_bits_ + (1u << kBitArrayDepthStart));  // array_depth + 1u;
+}
+
+TypeInference::Type TypeInference::Type::ShortyType(char shorty) {
+  switch (shorty) {
+    case 'L':
+      return Type(kFlagLowWord | kFlagNarrow | kFlagRef);
+    case 'D':
+      return Type(kFlagLowWord | kFlagWide | kFlagFp);
+    case 'J':
+      return Type(kFlagLowWord | kFlagWide | kFlagCore);
+    case 'F':
+      return Type(kFlagLowWord | kFlagNarrow | kFlagFp);
+    default:
+      DCHECK(shorty == 'I' || shorty == 'S' || shorty == 'C' || shorty == 'B' || shorty == 'Z');
+      return Type(kFlagLowWord | kFlagNarrow | kFlagCore);
+  }
+}
+
+TypeInference::Type TypeInference::Type::DexType(const DexFile* dex_file, uint32_t type_idx) {
+  const char* desc = dex_file->GetTypeDescriptor(dex_file->GetTypeId(type_idx));
+  if (UNLIKELY(desc[0] == 'V')) {
+    return Unknown();
+  } else if (UNLIKELY(desc[0] == '[')) {
+    size_t array_depth = 0u;
+    while (*desc == '[') {
+      ++array_depth;
+      ++desc;
+    }
+    if (UNLIKELY(array_depth > kMaxArrayDepth)) {
+      LOG(WARNING) << "Array depth exceeds " << kMaxArrayDepth << ": " << array_depth
+          << " in dex file " << dex_file->GetLocation() << " type index " << type_idx;
+      array_depth = kMaxArrayDepth;
+    }
+    Type shorty_result = Type::ShortyType(desc[0]);
+    return ArrayType(array_depth, shorty_result);
+  } else {
+    return ShortyType(desc[0]);
+  }
+}
+
+bool TypeInference::Type::MergeArrayConflict(Type src_type) {
+  DCHECK(Ref());
+  DCHECK_NE(ArrayDepth(), src_type.ArrayDepth());
+  DCHECK_GE(std::min(ArrayDepth(), src_type.ArrayDepth()), 1u);
+  bool size_conflict =
+      (ArrayDepth() == 1u && (raw_bits_ & kFlagArrayWide) != 0u) ||
+      (src_type.ArrayDepth() == 1u && (src_type.raw_bits_ & kFlagArrayWide) != 0u);
+  // Mark all three array type bits so that merging any other type bits will not change this type.
+  return Copy(Type((raw_bits_ & kMaskNonArray) |
+                   (1u << kBitArrayDepthStart) | kFlagArrayCore | kFlagArrayRef | kFlagArrayFp |
+                   kFlagArrayNarrow | (size_conflict ? kFlagArrayWide : 0u)));
+}
+
+bool TypeInference::Type::MergeStrong(Type src_type) {
+  bool changed = MergeNonArrayFlags(src_type);
+  if (src_type.ArrayDepth() != 0u) {
+    if (ArrayDepth() == 0u) {
+      DCHECK_EQ(raw_bits_ & ~kMaskNonArray, 0u);
+      DCHECK_NE(src_type.raw_bits_ & kFlagRef, 0u);
+      raw_bits_ |= src_type.raw_bits_ & (~kMaskNonArray | kFlagRef);
+      changed = true;
+    } else if (ArrayDepth() == src_type.ArrayDepth()) {
+      changed |= MergeBits(src_type, kMaskArrayWideAndType);
+    } else if (src_type.ArrayDepth() == 1u &&
+        (((src_type.raw_bits_ ^ UnknownArrayType().raw_bits_) & kMaskArrayWideAndType) == 0u ||
+         ((src_type.raw_bits_ ^ ObjectArrayType().raw_bits_) & kMaskArrayWideAndType) == 0u)) {
+      // Source type is [L or [? but current type is at least [[, preserve it.
+    } else if (ArrayDepth() == 1u &&
+        (((raw_bits_ ^ UnknownArrayType().raw_bits_) & kMaskArrayWideAndType) == 0u ||
+         ((raw_bits_ ^ ObjectArrayType().raw_bits_) & kMaskArrayWideAndType) == 0u)) {
+      // Overwrite [? or [L with the source array type which is at least [[.
+      raw_bits_ = (raw_bits_ & kMaskNonArray) | (src_type.raw_bits_ & ~kMaskNonArray);
+      changed = true;
+    } else {
+      // Mark the array value type with conflict - both ref and fp.
+      changed |= MergeArrayConflict(src_type);
+    }
+  }
+  return changed;
+}
+
+bool TypeInference::Type::MergeWeak(Type src_type) {
+  bool changed = MergeNonArrayFlags(src_type);
+  if (src_type.ArrayDepth() != 0u && src_type.NonNull()) {
+    DCHECK_NE(src_type.ArrayDepth(), 0u);
+    if (ArrayDepth() == 0u) {
+      DCHECK_EQ(raw_bits_ & ~kMaskNonArray, 0u);
+      // Preserve current type.
+    } else if (ArrayDepth() == src_type.ArrayDepth()) {
+      changed |= MergeBits(src_type, kMaskArrayWideAndType);
+    } else if (src_type.ArrayDepth() == 1u &&
+        (((src_type.raw_bits_ ^ UnknownArrayType().raw_bits_) & kMaskArrayWideAndType) == 0u ||
+         ((src_type.raw_bits_ ^ ObjectArrayType().raw_bits_) & kMaskArrayWideAndType) == 0u)) {
+      // Source type is [L or [? but current type is at least [[, preserve it.
+    } else if (ArrayDepth() == 1u &&
+        (((raw_bits_ ^ UnknownArrayType().raw_bits_) & kMaskArrayWideAndType) == 0u ||
+         ((raw_bits_ ^ ObjectArrayType().raw_bits_) & kMaskArrayWideAndType) == 0u)) {
+      // We have [? or [L. If it's [?, upgrade to [L as the source array type is at least [[.
+      changed |= MergeBits(ObjectArrayType(), kMaskArrayWideAndType);
+    } else {
+      // Mark the array value type with conflict - both ref and fp.
+      changed |= MergeArrayConflict(src_type);
+    }
+  }
+  return changed;
+}
+
+TypeInference::CheckCastData::CheckCastData(MIRGraph* mir_graph, ScopedArenaAllocator* alloc)
+    : mir_graph_(mir_graph),
+      alloc_(alloc),
+      num_blocks_(mir_graph->GetNumBlocks()),
+      num_sregs_(mir_graph->GetNumSSARegs()),
+      check_cast_map_(std::less<MIR*>(), alloc->Adapter()),
+      split_sreg_data_(std::less<int32_t>(), alloc->Adapter()) {
+}
+
+void TypeInference::CheckCastData::AddCheckCast(MIR* check_cast, Type type) {
+  DCHECK_EQ(check_cast->dalvikInsn.opcode, Instruction::CHECK_CAST);
+  type.CheckPureRef();
+  int32_t extra_s_reg = static_cast<int32_t>(num_sregs_);
+  num_sregs_ += 1;
+  check_cast_map_.Put(check_cast, CheckCastMapValue{extra_s_reg, type});  // NOLINT
+  int32_t s_reg = check_cast->ssa_rep->uses[0];
+  auto lb = split_sreg_data_.lower_bound(s_reg);
+  if (lb == split_sreg_data_.end() || split_sreg_data_.key_comp()(s_reg, lb->first)) {
+    SplitSRegData split_s_reg_data = {
+        0,
+        alloc_->AllocArray<int32_t>(num_blocks_, kArenaAllocMisc),
+        alloc_->AllocArray<int32_t>(num_blocks_, kArenaAllocMisc),
+        new (alloc_) ArenaBitVector(alloc_, num_blocks_, false)
+    };
+    std::fill_n(split_s_reg_data.starting_mod_s_reg, num_blocks_, INVALID_SREG);
+    std::fill_n(split_s_reg_data.ending_mod_s_reg, num_blocks_, INVALID_SREG);
+    split_s_reg_data.def_phi_blocks_->ClearAllBits();
+    BasicBlock* def_bb = FindDefBlock(check_cast);
+    split_s_reg_data.ending_mod_s_reg[def_bb->id] = s_reg;
+    split_s_reg_data.def_phi_blocks_->SetBit(def_bb->id);
+    lb = split_sreg_data_.PutBefore(lb, s_reg, split_s_reg_data);
+  }
+  lb->second.ending_mod_s_reg[check_cast->bb] = extra_s_reg;
+  lb->second.def_phi_blocks_->SetBit(check_cast->bb);
+}
+
+void TypeInference::CheckCastData::AddPseudoPhis() {
+  // Look for pseudo-phis where a split SSA reg merges with a differently typed version
+  // and initialize all starting_mod_s_reg.
+  DCHECK(!split_sreg_data_.empty());
+  ArenaBitVector* phi_blocks = new (alloc_) ArenaBitVector(alloc_, num_blocks_, false);
+
+  for (auto& entry : split_sreg_data_) {
+    SplitSRegData& data = entry.second;
+
+    // Find pseudo-phi nodes.
+    phi_blocks->ClearAllBits();
+    ArenaBitVector* input_blocks = data.def_phi_blocks_;
+    do {
+      for (uint32_t idx : input_blocks->Indexes()) {
+        BasicBlock* def_bb = mir_graph_->GetBasicBlock(idx);
+        if (def_bb->dom_frontier != nullptr) {
+          phi_blocks->Union(def_bb->dom_frontier);
+        }
+      }
+    } while (input_blocks->Union(phi_blocks));
+
+    // Find live pseudo-phis. Make sure they're merging the same SSA reg.
+    data.def_phi_blocks_->ClearAllBits();
+    int32_t s_reg = entry.first;
+    int v_reg = mir_graph_->SRegToVReg(s_reg);
+    for (uint32_t phi_bb_id : phi_blocks->Indexes()) {
+      BasicBlock* phi_bb = mir_graph_->GetBasicBlock(phi_bb_id);
+      DCHECK(phi_bb != nullptr);
+      DCHECK(phi_bb->data_flow_info != nullptr);
+      DCHECK(phi_bb->data_flow_info->live_in_v != nullptr);
+      if (IsSRegLiveAtStart(phi_bb, v_reg, s_reg)) {
+        int32_t extra_s_reg = static_cast<int32_t>(num_sregs_);
+        num_sregs_ += 1;
+        data.starting_mod_s_reg[phi_bb_id] = extra_s_reg;
+        data.def_phi_blocks_->SetBit(phi_bb_id);
+      }
+    }
+
+    // SSA rename for s_reg.
+    TopologicalSortIterator iter(mir_graph_);
+    for (BasicBlock* bb = iter.Next(); bb != nullptr; bb = iter.Next()) {
+      if (bb->data_flow_info == nullptr || bb->block_type == kEntryBlock) {
+        continue;
+      }
+      BasicBlockId bb_id = bb->id;
+      if (data.def_phi_blocks_->IsBitSet(bb_id)) {
+        DCHECK_NE(data.starting_mod_s_reg[bb_id], INVALID_SREG);
+      } else {
+        DCHECK_EQ(data.starting_mod_s_reg[bb_id], INVALID_SREG);
+        if (IsSRegLiveAtStart(bb, v_reg, s_reg)) {
+          // The earliest predecessor must have been processed already.
+          BasicBlock* pred_bb = FindTopologicallyEarliestPredecessor(bb);
+          int32_t mod_s_reg = data.ending_mod_s_reg[pred_bb->id];
+          data.starting_mod_s_reg[bb_id] = (mod_s_reg != INVALID_SREG) ? mod_s_reg : s_reg;
+        } else if (data.ending_mod_s_reg[bb_id] != INVALID_SREG) {
+          // Start the original defining block with s_reg.
+          data.starting_mod_s_reg[bb_id] = s_reg;
+        }
+      }
+      if (data.ending_mod_s_reg[bb_id] == INVALID_SREG) {
+        // If the block doesn't define the modified SSA reg, it propagates the starting type.
+        data.ending_mod_s_reg[bb_id] = data.starting_mod_s_reg[bb_id];
+      }
+    }
+  }
+}
+
+void TypeInference::CheckCastData::InitializeCheckCastSRegs(Type* sregs) const {
+  for (const auto& entry : check_cast_map_) {
+    DCHECK_LT(static_cast<size_t>(entry.second.modified_s_reg), num_sregs_);
+    sregs[entry.second.modified_s_reg] = entry.second.type.AsNonNull();
+  }
+}
+
+void TypeInference::CheckCastData::MergeCheckCastConflicts(Type* sregs) const {
+  for (const auto& entry : check_cast_map_) {
+    DCHECK_LT(static_cast<size_t>(entry.second.modified_s_reg), num_sregs_);
+    sregs[entry.first->ssa_rep->uses[0]].MergeNonArrayFlags(
+        sregs[entry.second.modified_s_reg].AsNull());
+  }
+}
+
+void TypeInference::CheckCastData::MarkPseudoPhiBlocks(uint64_t* bb_df_attrs) const {
+  for (auto& entry : split_sreg_data_) {
+    for (uint32_t bb_id : entry.second.def_phi_blocks_->Indexes()) {
+      bb_df_attrs[bb_id] |= DF_NULL_TRANSFER_N;
+    }
+  }
+}
+
+void TypeInference::CheckCastData::Start(BasicBlock* bb) {
+  for (auto& entry : split_sreg_data_) {
+    entry.second.current_mod_s_reg = entry.second.starting_mod_s_reg[bb->id];
+  }
+}
+
+bool TypeInference::CheckCastData::ProcessPseudoPhis(BasicBlock* bb, Type* sregs) {
+  bool changed = false;
+  for (auto& entry : split_sreg_data_) {
+    DCHECK_EQ(entry.second.current_mod_s_reg, entry.second.starting_mod_s_reg[bb->id]);
+    if (entry.second.def_phi_blocks_->IsBitSet(bb->id)) {
+      int32_t* ending_mod_s_reg = entry.second.ending_mod_s_reg;
+      Type merged_type = sregs[entry.second.current_mod_s_reg];
+      for (BasicBlockId pred_id : bb->predecessors) {
+        DCHECK_LT(static_cast<size_t>(ending_mod_s_reg[pred_id]), num_sregs_);
+        merged_type.MergeWeak(sregs[ending_mod_s_reg[pred_id]]);
+      }
+      if (UNLIKELY(!merged_type.IsDefined())) {
+        // This can happen during an initial merge of a loop head if the original def is
+        // actually an untyped null. (All other definitions are typed using the check-cast.)
+      } else if (merged_type.Wide()) {
+        // Ignore the pseudo-phi, just remember that there's a size mismatch.
+        sregs[entry.second.current_mod_s_reg].MarkSizeConflict();
+      } else {
+        DCHECK(merged_type.Narrow() && merged_type.LowWord() && !merged_type.HighWord());
+        // Propagate both down (fully) and up (without the "non-null" flag).
+        changed |= sregs[entry.second.current_mod_s_reg].Copy(merged_type);
+        merged_type = merged_type.AsNull();
+        for (BasicBlockId pred_id : bb->predecessors) {
+          DCHECK_LT(static_cast<size_t>(ending_mod_s_reg[pred_id]), num_sregs_);
+          sregs[ending_mod_s_reg[pred_id]].MergeStrong(merged_type);
+        }
+      }
+    }
+  }
+  return changed;
+}
+
+void TypeInference::CheckCastData::ProcessCheckCast(MIR* mir) {
+  auto mir_it = check_cast_map_.find(mir);
+  DCHECK(mir_it != check_cast_map_.end());
+  auto sreg_it = split_sreg_data_.find(mir->ssa_rep->uses[0]);
+  DCHECK(sreg_it != split_sreg_data_.end());
+  sreg_it->second.current_mod_s_reg = mir_it->second.modified_s_reg;
+}
+
+TypeInference::SplitSRegData* TypeInference::CheckCastData::GetSplitSRegData(int32_t s_reg) {
+  auto it = split_sreg_data_.find(s_reg);
+  return (it == split_sreg_data_.end()) ? nullptr : &it->second;
+}
+
+BasicBlock* TypeInference::CheckCastData::FindDefBlock(MIR* check_cast) {
+  // Find the initial definition of the SSA reg used by the check-cast.
+  DCHECK_EQ(check_cast->dalvikInsn.opcode, Instruction::CHECK_CAST);
+  int32_t s_reg = check_cast->ssa_rep->uses[0];
+  if (mir_graph_->IsInVReg(s_reg)) {
+    return mir_graph_->GetEntryBlock();
+  }
+  int v_reg = mir_graph_->SRegToVReg(s_reg);
+  BasicBlock* bb = mir_graph_->GetBasicBlock(check_cast->bb);
+  DCHECK(bb != nullptr);
+  while (true) {
+    // Find the earliest predecessor in the topological sort order to ensure we don't
+    // go in a loop.
+    BasicBlock* pred_bb = FindTopologicallyEarliestPredecessor(bb);
+    DCHECK(pred_bb != nullptr);
+    DCHECK(pred_bb->data_flow_info != nullptr);
+    DCHECK(pred_bb->data_flow_info->vreg_to_ssa_map_exit != nullptr);
+    if (pred_bb->data_flow_info->vreg_to_ssa_map_exit[v_reg] != s_reg) {
+      // The s_reg was not valid at the end of pred_bb, so it must have been defined in bb.
+      return bb;
+    }
+    bb = pred_bb;
+  }
+}
+
+BasicBlock* TypeInference::CheckCastData::FindTopologicallyEarliestPredecessor(BasicBlock* bb) {
+  DCHECK(!bb->predecessors.empty());
+  const auto& indexes = mir_graph_->GetTopologicalSortOrderIndexes();
+  DCHECK_LT(bb->id, indexes.size());
+  size_t best_idx = indexes[bb->id];
+  BasicBlockId best_id = NullBasicBlockId;
+  for (BasicBlockId pred_id : bb->predecessors) {
+    DCHECK_LT(pred_id, indexes.size());
+    if (best_idx > indexes[pred_id]) {
+      best_idx = indexes[pred_id];
+      best_id = pred_id;
+    }
+  }
+  // There must be at least one predecessor earlier than the bb.
+  DCHECK_LT(best_idx, indexes[bb->id]);
+  return mir_graph_->GetBasicBlock(best_id);
+}
+
+bool TypeInference::CheckCastData::IsSRegLiveAtStart(BasicBlock* bb, int v_reg, int32_t s_reg) {
+  DCHECK_EQ(v_reg, mir_graph_->SRegToVReg(s_reg));
+  DCHECK(bb != nullptr);
+  DCHECK(bb->data_flow_info != nullptr);
+  DCHECK(bb->data_flow_info->live_in_v != nullptr);
+  if (!bb->data_flow_info->live_in_v->IsBitSet(v_reg)) {
+    return false;
+  }
+  for (BasicBlockId pred_id : bb->predecessors) {
+    BasicBlock* pred_bb = mir_graph_->GetBasicBlock(pred_id);
+    DCHECK(pred_bb != nullptr);
+    DCHECK(pred_bb->data_flow_info != nullptr);
+    DCHECK(pred_bb->data_flow_info->vreg_to_ssa_map_exit != nullptr);
+    if (pred_bb->data_flow_info->vreg_to_ssa_map_exit[v_reg] != s_reg) {
+      return false;
+    }
+  }
+  return true;
+}
+
+TypeInference::TypeInference(MIRGraph* mir_graph, ScopedArenaAllocator* alloc)
+    : mir_graph_(mir_graph),
+      cu_(mir_graph->GetCurrentDexCompilationUnit()->GetCompilationUnit()),
+      check_cast_data_(!mir_graph->HasCheckCast() ? nullptr :
+          InitializeCheckCastData(mir_graph, alloc)),
+      num_sregs_(
+          check_cast_data_ != nullptr ? check_cast_data_->NumSRegs() : mir_graph->GetNumSSARegs()),
+      ifields_(mir_graph->GetIFieldLoweringInfoCount() == 0u ? nullptr :
+          PrepareIFieldTypes(cu_->dex_file, mir_graph, alloc)),
+      sfields_(mir_graph->GetSFieldLoweringInfoCount() == 0u ? nullptr :
+          PrepareSFieldTypes(cu_->dex_file, mir_graph, alloc)),
+      signatures_(mir_graph->GetMethodLoweringInfoCount() == 0u ? nullptr :
+          PrepareSignatures(cu_->dex_file, mir_graph, alloc)),
+      current_method_signature_(
+          Signature(cu_->dex_file, cu_->method_idx, (cu_->access_flags & kAccStatic) != 0, alloc)),
+      sregs_(alloc->AllocArray<Type>(num_sregs_, kArenaAllocMisc)),
+      bb_df_attrs_(alloc->AllocArray<uint64_t>(mir_graph->GetNumBlocks(), kArenaAllocDFInfo)) {
+  InitializeSRegs();
+}
+
+bool TypeInference::Apply(BasicBlock* bb) {
+  bool changed = false;
+  uint64_t bb_df_attrs = bb_df_attrs_[bb->id];
+  if (bb_df_attrs != 0u) {
+    if (UNLIKELY(check_cast_data_ != nullptr)) {
+      check_cast_data_->Start(bb);
+      if (bb_df_attrs & DF_NULL_TRANSFER_N) {
+        changed |= check_cast_data_->ProcessPseudoPhis(bb, sregs_);
+      }
+    }
+    MIR* mir = bb->first_mir_insn;
+    MIR* main_mirs_end = ((bb_df_attrs & DF_SAME_TYPE_AB) != 0u) ? bb->last_mir_insn : nullptr;
+    for (; mir != main_mirs_end && static_cast<int>(mir->dalvikInsn.opcode) == kMirOpPhi;
+        mir = mir->next) {
+      // Special-case handling for Phi comes first because we have 2 Phis instead of a wide one.
+      // At least one input must have been previously processed. Look for the first
+      // occurrence of a high_word or low_word flag to determine the type.
+      size_t num_uses = mir->ssa_rep->num_uses;
+      const int32_t* uses = mir->ssa_rep->uses;
+      const int32_t* defs = mir->ssa_rep->defs;
+      DCHECK_EQ(bb->predecessors.size(), num_uses);
+      Type merged_type = sregs_[defs[0]];
+      for (size_t pred_idx = 0; pred_idx != num_uses; ++pred_idx) {
+        int32_t input_mod_s_reg = PhiInputModifiedSReg(uses[pred_idx], bb, pred_idx);
+        merged_type.MergeWeak(sregs_[input_mod_s_reg]);
+      }
+      if (UNLIKELY(!merged_type.IsDefined())) {
+        // No change
+      } else if (merged_type.HighWord()) {
+        // Ignore the high word phi, just remember if there's a size mismatch.
+        if (UNLIKELY(merged_type.LowWord())) {
+          sregs_[defs[0]].MarkSizeConflict();
+        }
+      } else {
+        // Propagate both down (fully) and up (without the "non-null" flag).
+        changed |= sregs_[defs[0]].Copy(merged_type);
+        merged_type = merged_type.AsNull();
+        for (size_t pred_idx = 0; pred_idx != num_uses; ++pred_idx) {
+          int32_t input_mod_s_reg = PhiInputModifiedSReg(uses[pred_idx], bb, pred_idx);
+          changed |= UpdateSRegFromLowWordType(input_mod_s_reg, merged_type);
+        }
+      }
+    }
+
+    // Propagate types with MOVEs and AGETs, process CHECK_CASTs for modified SSA reg tracking.
+    for (; mir != main_mirs_end; mir = mir->next) {
+      uint64_t attrs = MIRGraph::GetDataFlowAttributes(mir);
+      size_t num_uses = mir->ssa_rep->num_uses;
+      const int32_t* uses = mir->ssa_rep->uses;
+      const int32_t* defs = mir->ssa_rep->defs;
+
+      // Special handling for moves. Propagate type both ways.
+      if ((attrs & DF_IS_MOVE) != 0) {
+        int32_t used_mod_s_reg = ModifiedSReg(uses[0]);
+        int32_t defd_mod_s_reg = defs[0];
+
+        // The "non-null" flag is propagated only downwards from actual definitions and it's
+        // not initially marked for moves, so used sreg must be marked before defined sreg.
+        // The only exception is an inlined move where we know the type from the original invoke.
+        DCHECK(sregs_[used_mod_s_reg].NonNull() || !sregs_[defd_mod_s_reg].NonNull() ||
+               (mir->optimization_flags & MIR_CALLEE) != 0);
+        changed |= UpdateSRegFromLowWordType(used_mod_s_reg, sregs_[defd_mod_s_reg].AsNull());
+
+        // The value is the same, so either both registers are null or no register is.
+        // In any case we can safely propagate the array type down.
+        changed |= UpdateSRegFromLowWordType(defd_mod_s_reg, sregs_[used_mod_s_reg]);
+        if (UNLIKELY((attrs & DF_REF_A) == 0 && sregs_[used_mod_s_reg].Ref())) {
+          // Mark type conflict: move instead of move-object.
+          sregs_[used_mod_s_reg].MarkTypeConflict();
+        }
+        continue;
+      }
+
+      // Handle AGET/APUT.
+      if ((attrs & DF_HAS_RANGE_CHKS) != 0) {
+        int32_t base_mod_s_reg = ModifiedSReg(uses[num_uses - 2u]);
+        int32_t mod_s_reg = (attrs & DF_DA) != 0 ? defs[0] : ModifiedSReg(uses[0]);
+        DCHECK_NE(sregs_[base_mod_s_reg].ArrayDepth(), 0u);
+        if (!sregs_[base_mod_s_reg].NonNull()) {
+          // If the base is null, don't propagate anything. All that we could determine
+          // has already been merged in the previous stage.
+        } else {
+          changed |= UpdateSRegFromLowWordType(mod_s_reg, sregs_[base_mod_s_reg].ComponentType());
+          Type array_type = Type::ArrayTypeFromComponent(sregs_[mod_s_reg]);
+          if ((attrs & DF_DA) != 0) {
+            changed |= sregs_[base_mod_s_reg].MergeStrong(array_type);
+          } else {
+            changed |= sregs_[base_mod_s_reg].MergeWeak(array_type);
+          }
+        }
+        if (UNLIKELY((attrs & DF_REF_A) == 0 && sregs_[mod_s_reg].Ref())) {
+          // Mark type conflict: aget/aput instead of aget/aput-object.
+          sregs_[mod_s_reg].MarkTypeConflict();
+        }
+        continue;
+      }
+
+      // Special-case handling for check-cast to advance modified SSA reg.
+      if (UNLIKELY((attrs & DF_CHK_CAST) != 0)) {
+        DCHECK(check_cast_data_ != nullptr);
+        check_cast_data_->ProcessCheckCast(mir);
+      }
+    }
+
+    // Propagate types for IF_cc if present.
+    if (mir != nullptr) {
+      DCHECK(mir == bb->last_mir_insn);
+      DCHECK(mir->next == nullptr);
+      DCHECK_NE(MIRGraph::GetDataFlowAttributes(mir) & DF_SAME_TYPE_AB, 0u);
+      DCHECK_EQ(mir->ssa_rep->num_uses, 2u);
+      const int32_t* uses = mir->ssa_rep->uses;
+      int32_t mod_s_reg0 = ModifiedSReg(uses[0]);
+      int32_t mod_s_reg1 = ModifiedSReg(uses[1]);
+      changed |= sregs_[mod_s_reg0].MergeWeak(sregs_[mod_s_reg1].AsNull());
+      changed |= sregs_[mod_s_reg1].MergeWeak(sregs_[mod_s_reg0].AsNull());
+    }
+  }
+  return changed;
+}
+
+void TypeInference::Finish() {
+  if (UNLIKELY(check_cast_data_ != nullptr)) {
+    check_cast_data_->MergeCheckCastConflicts(sregs_);
+  }
+
+  size_t num_sregs = mir_graph_->GetNumSSARegs();  // Without the extra SSA regs.
+  for (size_t s_reg = 0; s_reg != num_sregs; ++s_reg) {
+    if (sregs_[s_reg].SizeConflict()) {
+      /*
+       * The dex bytecode definition does not explicitly outlaw the definition of the same
+       * virtual register to be used in both a 32-bit and 64-bit pair context.  However, dx
+       * does not generate this pattern (at least recently).  Further, in the next revision of
+       * dex, we will forbid this.  To support the few cases in the wild, detect this pattern
+       * and punt to the interpreter.
+       */
+      LOG(WARNING) << PrettyMethod(cu_->method_idx, *cu_->dex_file)
+                   << " has size conflict block for sreg " << s_reg
+                   << ", punting to interpreter.";
+      mir_graph_->SetPuntToInterpreter(true);
+      return;
+    }
+  }
+
+  size_t conflict_s_reg = 0;
+  bool type_conflict = false;
+  for (size_t s_reg = 0; s_reg != num_sregs; ++s_reg) {
+    Type type = sregs_[s_reg];
+    RegLocation* loc = &mir_graph_->reg_location_[s_reg];
+    loc->wide = type.Wide();
+    loc->defined = type.IsDefined();
+    loc->fp = type.Fp();
+    loc->core = type.Core();
+    loc->ref = type.Ref();
+    loc->high_word = type.HighWord();
+    if (UNLIKELY(type.TypeConflict())) {
+      type_conflict = true;
+      conflict_s_reg = s_reg;
+    }
+  }
+
+  if (type_conflict) {
+    /*
+     * We don't normally expect to see a Dalvik register definition used both as a
+     * floating point and core value, though technically it could happen with constants.
+     * Until we have proper typing, detect this situation and disable register promotion
+     * (which relies on the distinction between core a fp usages).
+     */
+    LOG(WARNING) << PrettyMethod(cu_->method_idx, *cu_->dex_file)
+                 << " has type conflict block for sreg " << conflict_s_reg
+                 << ", disabling register promotion.";
+    cu_->disable_opt |= (1 << kPromoteRegs);
+  }
+}
+
+TypeInference::Type TypeInference::FieldType(const DexFile* dex_file, uint32_t field_idx) {
+  uint32_t type_idx = dex_file->GetFieldId(field_idx).type_idx_;
+  Type result = Type::DexType(dex_file, type_idx);
+  return result;
+}
+
+TypeInference::Type* TypeInference::PrepareIFieldTypes(const DexFile* dex_file,
+                                                       MIRGraph* mir_graph,
+                                                       ScopedArenaAllocator* alloc) {
+  size_t count = mir_graph->GetIFieldLoweringInfoCount();
+  Type* ifields = alloc->AllocArray<Type>(count, kArenaAllocDFInfo);
+  for (uint32_t i = 0u; i != count; ++i) {
+    // NOTE: Quickened field accesses have invalid FieldIndex() but they are always resolved.
+    const MirFieldInfo& info = mir_graph->GetIFieldLoweringInfo(i);
+    const DexFile* current_dex_file = info.IsResolved() ? info.DeclaringDexFile() : dex_file;
+    uint32_t field_idx = info.IsResolved() ? info.DeclaringFieldIndex() : info.FieldIndex();
+    ifields[i] = FieldType(current_dex_file, field_idx);
+    DCHECK_EQ(info.MemAccessType() == kDexMemAccessWide, ifields[i].Wide());
+    DCHECK_EQ(info.MemAccessType() == kDexMemAccessObject, ifields[i].Ref());
+  }
+  return ifields;
+}
+
+TypeInference::Type* TypeInference::PrepareSFieldTypes(const DexFile* dex_file,
+                                                       MIRGraph* mir_graph,
+                                                       ScopedArenaAllocator* alloc) {
+  size_t count = mir_graph->GetSFieldLoweringInfoCount();
+  Type* sfields = alloc->AllocArray<Type>(count, kArenaAllocDFInfo);
+  for (uint32_t i = 0u; i != count; ++i) {
+    // FieldIndex() is always valid for static fields (no quickened instructions).
+    sfields[i] = FieldType(dex_file, mir_graph->GetSFieldLoweringInfo(i).FieldIndex());
+  }
+  return sfields;
+}
+
+TypeInference::MethodSignature TypeInference::Signature(const DexFile* dex_file,
+                                                        uint32_t method_idx,
+                                                        bool is_static,
+                                                        ScopedArenaAllocator* alloc) {
+  const DexFile::MethodId& method_id = dex_file->GetMethodId(method_idx);
+  const DexFile::ProtoId& proto_id = dex_file->GetMethodPrototype(method_id);
+  Type return_type = Type::DexType(dex_file, proto_id.return_type_idx_);
+  const DexFile::TypeList* type_list = dex_file->GetProtoParameters(proto_id);
+  size_t this_size = (is_static ? 0u : 1u);
+  size_t param_size = ((type_list != nullptr) ? type_list->Size() : 0u);
+  size_t size = this_size + param_size;
+  Type* param_types = (size != 0u) ? alloc->AllocArray<Type>(size, kArenaAllocDFInfo) : nullptr;
+  if (!is_static) {
+    param_types[0] = Type::DexType(dex_file, method_id.class_idx_);
+  }
+  for (size_t i = 0; i != param_size; ++i)  {
+    uint32_t type_idx = type_list->GetTypeItem(i).type_idx_;
+    param_types[this_size + i] = Type::DexType(dex_file, type_idx);
+  }
+  return MethodSignature{ return_type, size, param_types };  // NOLINT
+}
+
+TypeInference::MethodSignature* TypeInference::PrepareSignatures(const DexFile* dex_file,
+                                                                 MIRGraph* mir_graph,
+                                                                 ScopedArenaAllocator* alloc) {
+  size_t count = mir_graph->GetMethodLoweringInfoCount();
+  MethodSignature* signatures = alloc->AllocArray<MethodSignature>(count, kArenaAllocDFInfo);
+  for (uint32_t i = 0u; i != count; ++i) {
+    // NOTE: Quickened invokes have invalid MethodIndex() but they are always resolved.
+    const MirMethodInfo& info = mir_graph->GetMethodLoweringInfo(i);
+    uint32_t method_idx = info.IsResolved() ? info.DeclaringMethodIndex() : info.MethodIndex();
+    const DexFile* current_dex_file = info.IsResolved() ? info.DeclaringDexFile() : dex_file;
+    signatures[i] = Signature(current_dex_file, method_idx, info.IsStatic(), alloc);
+  }
+  return signatures;
+}
+
+TypeInference::CheckCastData* TypeInference::InitializeCheckCastData(MIRGraph* mir_graph,
+                                                                     ScopedArenaAllocator* alloc) {
+  if (!mir_graph->HasCheckCast()) {
+    return nullptr;
+  }
+
+  CheckCastData* data = nullptr;
+  const DexFile* dex_file = nullptr;
+  PreOrderDfsIterator iter(mir_graph);
+  for (BasicBlock* bb = iter.Next(); bb != nullptr; bb = iter.Next()) {
+    for (MIR* mir = bb->first_mir_insn; mir != nullptr; mir = mir->next) {
+      if (mir->dalvikInsn.opcode == Instruction::CHECK_CAST) {
+        if (data == nullptr) {
+          data = new (alloc) CheckCastData(mir_graph, alloc);
+          dex_file = mir_graph->GetCurrentDexCompilationUnit()->GetCompilationUnit()->dex_file;
+        }
+        Type type = Type::DexType(dex_file, mir->dalvikInsn.vB);
+        data->AddCheckCast(mir, type);
+      }
+    }
+  }
+  if (data != nullptr) {
+    data->AddPseudoPhis();
+  }
+  return data;
+}
+
+void TypeInference::InitializeSRegs() {
+  std::fill_n(sregs_, num_sregs_, Type::Unknown());
+
+  // Initialize parameter SSA regs at method entry.
+  int32_t entry_param_s_reg = mir_graph_->GetFirstInVR();
+  for (size_t i = 0, size = current_method_signature_.num_params; i != size; ++i)  {
+    Type param_type = current_method_signature_.param_types[i].AsNonNull();
+    sregs_[entry_param_s_reg] = param_type;
+    entry_param_s_reg += param_type.Wide() ? 2 : 1;
+  }
+  DCHECK_EQ(static_cast<uint32_t>(entry_param_s_reg),
+            mir_graph_->GetFirstInVR() + mir_graph_->GetNumOfInVRs());
+
+  // Initialize check-cast types.
+  if (UNLIKELY(check_cast_data_ != nullptr)) {
+    check_cast_data_->InitializeCheckCastSRegs(sregs_);
+  }
+
+  // Initialize well-known SSA register definition types. Merge inferred types
+  // upwards where a single merge is enough (INVOKE arguments and return type,
+  // RETURN type, IPUT/SPUT source type).
+  // NOTE: Using topological sort order to make sure the definition comes before
+  // any upward merging. This allows simple assignment of the defined types
+  // instead of MergeStrong().
+  TopologicalSortIterator iter(mir_graph_);
+  for (BasicBlock* bb = iter.Next(); bb != nullptr; bb = iter.Next()) {
+    uint64_t bb_df_attrs = 0u;
+    if (UNLIKELY(check_cast_data_ != nullptr)) {
+      check_cast_data_->Start(bb);
+    }
+    // Ignore pseudo-phis, we're not setting types for SSA regs that depend on them in this pass.
+    for (MIR* mir = bb->first_mir_insn; mir != nullptr; mir = mir->next) {
+      uint64_t attrs = MIRGraph::GetDataFlowAttributes(mir);
+      bb_df_attrs |= attrs;
+
+      const uint32_t num_uses = mir->ssa_rep->num_uses;
+      const int32_t* uses = mir->ssa_rep->uses;
+      const int32_t* defs = mir->ssa_rep->defs;
+
+      uint16_t opcode = mir->dalvikInsn.opcode;
+      switch (opcode) {
+        case Instruction::CONST_4:
+        case Instruction::CONST_16:
+        case Instruction::CONST:
+        case Instruction::CONST_HIGH16:
+        case Instruction::CONST_WIDE_16:
+        case Instruction::CONST_WIDE_32:
+        case Instruction::CONST_WIDE:
+        case Instruction::CONST_WIDE_HIGH16:
+        case Instruction::MOVE:
+        case Instruction::MOVE_FROM16:
+        case Instruction::MOVE_16:
+        case Instruction::MOVE_WIDE:
+        case Instruction::MOVE_WIDE_FROM16:
+        case Instruction::MOVE_WIDE_16:
+        case Instruction::MOVE_OBJECT:
+        case Instruction::MOVE_OBJECT_FROM16:
+        case Instruction::MOVE_OBJECT_16:
+          if ((mir->optimization_flags & MIR_CALLEE) != 0) {
+            // Inlined const/move keeps method_lowering_info for type inference.
+            DCHECK_LT(mir->meta.method_lowering_info, mir_graph_->GetMethodLoweringInfoCount());
+            Type return_type = signatures_[mir->meta.method_lowering_info].return_type;
+            DCHECK(return_type.IsDefined());  // Method return type can't be void.
+            sregs_[defs[0]] = return_type.AsNonNull();
+            if (return_type.Wide()) {
+              DCHECK_EQ(defs[0] + 1, defs[1]);
+              sregs_[defs[1]] = return_type.ToHighWord();
+            }
+            break;
+          }
+          FALLTHROUGH_INTENDED;
+        case kMirOpPhi:
+          // These cannot be determined in this simple pass and will be processed later.
+          break;
+
+        case Instruction::MOVE_RESULT:
+        case Instruction::MOVE_RESULT_WIDE:
+        case Instruction::MOVE_RESULT_OBJECT:
+          // Nothing to do, handled with invoke-* or filled-new-array/-range.
+          break;
+        case Instruction::MOVE_EXCEPTION:
+          // NOTE: We can never catch an array.
+          sregs_[defs[0]] = Type::NonArrayRefType().AsNonNull();
+          break;
+        case Instruction::CONST_STRING:
+        case Instruction::CONST_STRING_JUMBO:
+          sregs_[defs[0]] = Type::NonArrayRefType().AsNonNull();
+          break;
+        case Instruction::CONST_CLASS:
+          sregs_[defs[0]] = Type::NonArrayRefType().AsNonNull();
+          break;
+        case Instruction::CHECK_CAST:
+          DCHECK(check_cast_data_ != nullptr);
+          check_cast_data_->ProcessCheckCast(mir);
+          break;
+        case Instruction::ARRAY_LENGTH:
+          sregs_[ModifiedSReg(uses[0])].MergeStrong(Type::UnknownArrayType());
+          break;
+        case Instruction::NEW_INSTANCE:
+          sregs_[defs[0]] = Type::DexType(cu_->dex_file, mir->dalvikInsn.vB).AsNonNull();
+          DCHECK(sregs_[defs[0]].Ref());
+          DCHECK_EQ(sregs_[defs[0]].ArrayDepth(), 0u);
+          break;
+        case Instruction::NEW_ARRAY:
+          sregs_[defs[0]] = Type::DexType(cu_->dex_file, mir->dalvikInsn.vC).AsNonNull();
+          DCHECK(sregs_[defs[0]].Ref());
+          DCHECK_NE(sregs_[defs[0]].ArrayDepth(), 0u);
+          break;
+        case Instruction::FILLED_NEW_ARRAY:
+        case Instruction::FILLED_NEW_ARRAY_RANGE: {
+          Type array_type = Type::DexType(cu_->dex_file, mir->dalvikInsn.vB);
+          array_type.CheckPureRef();  // Previously checked by the method verifier.
+          DCHECK_NE(array_type.ArrayDepth(), 0u);
+          Type component_type = array_type.ComponentType();
+          DCHECK(!component_type.Wide());
+          MIR* move_result_mir = mir_graph_->FindMoveResult(bb, mir);
+          if (move_result_mir != nullptr) {
+            DCHECK_EQ(move_result_mir->dalvikInsn.opcode, Instruction::MOVE_RESULT_OBJECT);
+            sregs_[move_result_mir->ssa_rep->defs[0]] = array_type.AsNonNull();
+          }
+          DCHECK_EQ(num_uses, mir->dalvikInsn.vA);
+          for (size_t next = 0u; next != num_uses; ++next) {
+            int32_t input_mod_s_reg = ModifiedSReg(uses[next]);
+            sregs_[input_mod_s_reg].MergeStrong(component_type);
+          }
+          break;
+        }
+        case Instruction::INVOKE_VIRTUAL:
+        case Instruction::INVOKE_SUPER:
+        case Instruction::INVOKE_DIRECT:
+        case Instruction::INVOKE_STATIC:
+        case Instruction::INVOKE_INTERFACE:
+        case Instruction::INVOKE_VIRTUAL_RANGE:
+        case Instruction::INVOKE_SUPER_RANGE:
+        case Instruction::INVOKE_DIRECT_RANGE:
+        case Instruction::INVOKE_STATIC_RANGE:
+        case Instruction::INVOKE_INTERFACE_RANGE:
+        case Instruction::INVOKE_VIRTUAL_QUICK:
+        case Instruction::INVOKE_VIRTUAL_RANGE_QUICK: {
+          const MethodSignature* signature = &signatures_[mir->meta.method_lowering_info];
+          MIR* move_result_mir = mir_graph_->FindMoveResult(bb, mir);
+          if (move_result_mir != nullptr) {
+            Type return_type = signature->return_type;
+            sregs_[move_result_mir->ssa_rep->defs[0]] = return_type.AsNonNull();
+            if (return_type.Wide()) {
+              DCHECK_EQ(move_result_mir->ssa_rep->defs[0] + 1, move_result_mir->ssa_rep->defs[1]);
+              sregs_[move_result_mir->ssa_rep->defs[1]] = return_type.ToHighWord();
+            }
+          }
+          size_t next = 0u;
+          for (size_t i = 0, size = signature->num_params; i != size; ++i)  {
+            Type param_type = signature->param_types[i];
+            int32_t param_s_reg = ModifiedSReg(uses[next]);
+            DCHECK(!param_type.Wide() || uses[next] + 1 == uses[next + 1]);
+            UpdateSRegFromLowWordType(param_s_reg, param_type);
+            next += param_type.Wide() ? 2 : 1;
+          }
+          DCHECK_EQ(next, num_uses);
+          DCHECK_EQ(next, mir->dalvikInsn.vA);
+          break;
+        }
+
+        case Instruction::RETURN_WIDE:
+          DCHECK(current_method_signature_.return_type.Wide());
+          DCHECK_EQ(uses[0] + 1, uses[1]);
+          DCHECK_EQ(ModifiedSReg(uses[0]), uses[0]);
+          FALLTHROUGH_INTENDED;
+        case Instruction::RETURN:
+        case Instruction::RETURN_OBJECT: {
+          int32_t mod_s_reg = ModifiedSReg(uses[0]);
+          UpdateSRegFromLowWordType(mod_s_reg, current_method_signature_.return_type);
+          break;
+        }
+
+        // NOTE: For AGET/APUT we set only the array type. The operand type is set
+        // below based on the data flow attributes.
+        case Instruction::AGET:
+        case Instruction::APUT:
+          sregs_[ModifiedSReg(uses[num_uses - 2u])].MergeStrong(Type::NarrowArrayType());
+          break;
+        case Instruction::AGET_WIDE:
+        case Instruction::APUT_WIDE:
+          sregs_[ModifiedSReg(uses[num_uses - 2u])].MergeStrong(Type::WideArrayType());
+          break;
+        case Instruction::AGET_OBJECT:
+          sregs_[defs[0]] = sregs_[defs[0]].AsNonNull();
+          FALLTHROUGH_INTENDED;
+        case Instruction::APUT_OBJECT:
+          sregs_[ModifiedSReg(uses[num_uses - 2u])].MergeStrong(Type::ObjectArrayType());
+          break;
+        case Instruction::AGET_BOOLEAN:
+        case Instruction::APUT_BOOLEAN:
+        case Instruction::AGET_BYTE:
+        case Instruction::APUT_BYTE:
+        case Instruction::AGET_CHAR:
+        case Instruction::APUT_CHAR:
+        case Instruction::AGET_SHORT:
+        case Instruction::APUT_SHORT:
+          sregs_[ModifiedSReg(uses[num_uses - 2u])].MergeStrong(Type::NarrowCoreArrayType());
+          break;
+
+        case Instruction::IGET_WIDE:
+        case Instruction::IGET_WIDE_QUICK:
+          DCHECK_EQ(defs[0] + 1, defs[1]);
+          DCHECK_LT(mir->meta.ifield_lowering_info, mir_graph_->GetIFieldLoweringInfoCount());
+          sregs_[defs[1]] = ifields_[mir->meta.ifield_lowering_info].ToHighWord();
+          FALLTHROUGH_INTENDED;
+        case Instruction::IGET:
+        case Instruction::IGET_OBJECT:
+        case Instruction::IGET_BOOLEAN:
+        case Instruction::IGET_BYTE:
+        case Instruction::IGET_CHAR:
+        case Instruction::IGET_SHORT:
+        case Instruction::IGET_QUICK:
+        case Instruction::IGET_OBJECT_QUICK:
+        case Instruction::IGET_BOOLEAN_QUICK:
+        case Instruction::IGET_BYTE_QUICK:
+        case Instruction::IGET_CHAR_QUICK:
+        case Instruction::IGET_SHORT_QUICK:
+          DCHECK_LT(mir->meta.ifield_lowering_info, mir_graph_->GetIFieldLoweringInfoCount());
+          sregs_[defs[0]] = ifields_[mir->meta.ifield_lowering_info].AsNonNull();
+          break;
+        case Instruction::IPUT_WIDE:
+        case Instruction::IPUT_WIDE_QUICK:
+          DCHECK_EQ(uses[0] + 1, uses[1]);
+          FALLTHROUGH_INTENDED;
+        case Instruction::IPUT:
+        case Instruction::IPUT_OBJECT:
+        case Instruction::IPUT_BOOLEAN:
+        case Instruction::IPUT_BYTE:
+        case Instruction::IPUT_CHAR:
+        case Instruction::IPUT_SHORT:
+        case Instruction::IPUT_QUICK:
+        case Instruction::IPUT_OBJECT_QUICK:
+        case Instruction::IPUT_BOOLEAN_QUICK:
+        case Instruction::IPUT_BYTE_QUICK:
+        case Instruction::IPUT_CHAR_QUICK:
+        case Instruction::IPUT_SHORT_QUICK:
+          DCHECK_LT(mir->meta.ifield_lowering_info, mir_graph_->GetIFieldLoweringInfoCount());
+          UpdateSRegFromLowWordType(ModifiedSReg(uses[0]),
+                                    ifields_[mir->meta.ifield_lowering_info]);
+          break;
+        case Instruction::SGET_WIDE:
+          DCHECK_EQ(defs[0] + 1, defs[1]);
+          DCHECK_LT(mir->meta.sfield_lowering_info, mir_graph_->GetSFieldLoweringInfoCount());
+          sregs_[defs[1]] = sfields_[mir->meta.sfield_lowering_info].ToHighWord();
+          FALLTHROUGH_INTENDED;
+        case Instruction::SGET:
+        case Instruction::SGET_OBJECT:
+        case Instruction::SGET_BOOLEAN:
+        case Instruction::SGET_BYTE:
+        case Instruction::SGET_CHAR:
+        case Instruction::SGET_SHORT:
+          DCHECK_LT(mir->meta.sfield_lowering_info, mir_graph_->GetSFieldLoweringInfoCount());
+          sregs_[defs[0]] = sfields_[mir->meta.sfield_lowering_info].AsNonNull();
+          break;
+        case Instruction::SPUT_WIDE:
+          DCHECK_EQ(uses[0] + 1, uses[1]);
+          FALLTHROUGH_INTENDED;
+        case Instruction::SPUT:
+        case Instruction::SPUT_OBJECT:
+        case Instruction::SPUT_BOOLEAN:
+        case Instruction::SPUT_BYTE:
+        case Instruction::SPUT_CHAR:
+        case Instruction::SPUT_SHORT:
+          DCHECK_LT(mir->meta.sfield_lowering_info, mir_graph_->GetSFieldLoweringInfoCount());
+          UpdateSRegFromLowWordType(ModifiedSReg(uses[0]),
+                                          sfields_[mir->meta.sfield_lowering_info]);
+          break;
+
+        default:
+          // No invokes or reference definitions here.
+          DCHECK_EQ(attrs & (DF_FORMAT_35C | DF_FORMAT_3RC), 0u);
+          DCHECK_NE(attrs & (DF_DA | DF_REF_A), (DF_DA | DF_REF_A));
+          break;
+      }
+
+      if ((attrs & DF_NULL_TRANSFER_N) != 0) {
+        // Don't process Phis at this stage.
+        continue;
+      }
+
+      // Handle defs
+      if (attrs & DF_DA) {
+        int32_t s_reg = defs[0];
+        sregs_[s_reg].SetLowWord();
+        if (attrs & DF_FP_A) {
+          sregs_[s_reg].SetFp();
+        }
+        if (attrs & DF_CORE_A) {
+          sregs_[s_reg].SetCore();
+        }
+        if (attrs & DF_REF_A) {
+          sregs_[s_reg].SetRef();
+        }
+        if (attrs & DF_A_WIDE) {
+          sregs_[s_reg].SetWide();
+          DCHECK_EQ(s_reg + 1, ModifiedSReg(defs[1]));
+          sregs_[s_reg + 1].MergeHighWord(sregs_[s_reg]);
+        } else {
+          sregs_[s_reg].SetNarrow();
+        }
+      }
+
+      // Handles uses
+      size_t next = 0;
+  #define PROCESS(REG)                                                        \
+      if (attrs & DF_U##REG) {                                                \
+        int32_t mod_s_reg = ModifiedSReg(uses[next]);                         \
+        sregs_[mod_s_reg].SetLowWord();                                       \
+        if (attrs & DF_FP_##REG) {                                            \
+          sregs_[mod_s_reg].SetFp();                                          \
+        }                                                                     \
+        if (attrs & DF_CORE_##REG) {                                          \
+          sregs_[mod_s_reg].SetCore();                                        \
+        }                                                                     \
+        if (attrs & DF_REF_##REG) {                                           \
+          sregs_[mod_s_reg].SetRef();                                         \
+        }                                                                     \
+        if (attrs & DF_##REG##_WIDE) {                                        \
+          sregs_[mod_s_reg].SetWide();                                        \
+          DCHECK_EQ(mod_s_reg + 1, ModifiedSReg(uses[next + 1]));             \
+          sregs_[mod_s_reg + 1].SetWide();                                    \
+          sregs_[mod_s_reg + 1].MergeHighWord(sregs_[mod_s_reg]);             \
+          next += 2;                                                          \
+        } else {                                                              \
+          sregs_[mod_s_reg].SetNarrow();                                      \
+          next++;                                                             \
+        }                                                                     \
+      }
+      PROCESS(A)
+      PROCESS(B)
+      PROCESS(C)
+  #undef PROCESS
+      DCHECK(next == mir->ssa_rep->num_uses || (attrs & (DF_FORMAT_35C | DF_FORMAT_3RC)) != 0);
+    }
+    // Record relevant attributes.
+    bb_df_attrs_[bb->id] = bb_df_attrs &
+        (DF_NULL_TRANSFER_N | DF_CHK_CAST | DF_IS_MOVE | DF_HAS_RANGE_CHKS | DF_SAME_TYPE_AB);
+  }
+
+  if (UNLIKELY(check_cast_data_ != nullptr)) {
+    check_cast_data_->MarkPseudoPhiBlocks(bb_df_attrs_);
+  }
+}
+
+int32_t TypeInference::ModifiedSReg(int32_t s_reg) {
+  if (UNLIKELY(check_cast_data_ != nullptr)) {
+    SplitSRegData* split_data = check_cast_data_->GetSplitSRegData(s_reg);
+    if (UNLIKELY(split_data != nullptr)) {
+      DCHECK_NE(split_data->current_mod_s_reg, INVALID_SREG);
+      return split_data->current_mod_s_reg;
+    }
+  }
+  return s_reg;
+}
+
+int32_t TypeInference::PhiInputModifiedSReg(int32_t s_reg, BasicBlock* bb, size_t pred_idx) {
+  DCHECK_LT(pred_idx, bb->predecessors.size());
+  if (UNLIKELY(check_cast_data_ != nullptr)) {
+    SplitSRegData* split_data = check_cast_data_->GetSplitSRegData(s_reg);
+    if (UNLIKELY(split_data != nullptr)) {
+      return split_data->ending_mod_s_reg[bb->predecessors[pred_idx]];
+    }
+  }
+  return s_reg;
+}
+
+bool TypeInference::UpdateSRegFromLowWordType(int32_t mod_s_reg, Type low_word_type) {
+  DCHECK(low_word_type.LowWord());
+  bool changed = sregs_[mod_s_reg].MergeStrong(low_word_type);
+  if (!sregs_[mod_s_reg].Narrow()) {  // Wide without conflict with narrow.
+    DCHECK(!low_word_type.Narrow());
+    DCHECK_LT(mod_s_reg, mir_graph_->GetNumSSARegs());  // Original SSA reg.
+    changed |= sregs_[mod_s_reg + 1].MergeHighWord(sregs_[mod_s_reg]);
+  }
+  return changed;
+}
+
+}  // namespace art
diff --git a/compiler/dex/type_inference.h b/compiler/dex/type_inference.h
new file mode 100644
index 0000000..c9b29bf
--- /dev/null
+++ b/compiler/dex/type_inference.h
@@ -0,0 +1,443 @@
+/*
+ * Copyright (C) 2015 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef ART_COMPILER_DEX_TYPE_INFERENCE_H_
+#define ART_COMPILER_DEX_TYPE_INFERENCE_H_
+
+#include "base/logging.h"
+#include "base/arena_object.h"
+#include "base/scoped_arena_containers.h"
+
+namespace art {
+
+class ArenaBitVector;
+class BasicBlock;
+struct CompilationUnit;
+class DexFile;
+class MirFieldInfo;
+class MirMethodInfo;
+class MIR;
+class MIRGraph;
+
+/**
+ * @brief Determine the type of SSA registers.
+ *
+ * @details
+ * Because Dalvik's bytecode is not fully typed, we have to do some work to figure
+ * out the sreg type.  For some operations it is clear based on the opcode (i.e.
+ * ADD_FLOAT v0, v1, v2), but for others (MOVE), we may never know the "real" type.
+ *
+ * We perform the type inference operation in two phases:
+ *   1. First, we make one pass over all insns in the topological sort order and
+ *      extract known type information from all insns for their defs and uses.
+ *   2. Then we repeatedly go through the graph to process insns that can propagate
+ *      types from inputs to outputs and vice versa. These insns are just the MOVEs,
+ *      AGET/APUTs, IF_ccs and Phis (including pseudo-Phis, see below).
+ *
+ * Since the main purpose is to determine the basic FP/core/reference type, we don't
+ * need to record the precise reference type, we only record the array type to determine
+ * the result types of agets and source type of aputs.
+ *
+ * One complication is the check-cast instruction that effectively defines a new
+ * virtual register that has a different type than the original sreg. We need to
+ * track these virtual sregs and insert pseudo-phis where they merge.
+ *
+ * Another problems is with null references. The same zero constant can be used
+ * as differently typed null and moved around with move-object which would normally
+ * be an ill-formed assignment. So we need to keep track of values that can be null
+ * and values that cannot.
+ *
+ * Note that it's possible to have the same sreg show multiple defined types because dx
+ * treats constants as untyped bit patterns. We disable register promotion in that case.
+ */
+class TypeInference : public DeletableArenaObject<kArenaAllocMisc> {
+ public:
+  TypeInference(MIRGraph* mir_graph, ScopedArenaAllocator* alloc);
+
+  bool Apply(BasicBlock* bb);
+  void Finish();
+
+ private:
+  struct Type {
+    static Type Unknown() {
+      return Type(0u);
+    }
+
+    static Type NonArrayRefType() {
+      return Type(kFlagLowWord | kFlagNarrow | kFlagRef);
+    }
+
+    static Type ObjectArrayType() {
+      return Type(kFlagNarrow | kFlagRef | kFlagLowWord |
+                  (1u << kBitArrayDepthStart) | kFlagArrayNarrow | kFlagArrayRef);
+    }
+
+    static Type WideArrayType() {
+      // Core or FP unknown.
+      return Type(kFlagNarrow | kFlagRef | kFlagLowWord |
+                  (1u << kBitArrayDepthStart) | kFlagArrayWide);
+    }
+
+    static Type NarrowArrayType() {
+      // Core or FP unknown.
+      return Type(kFlagNarrow | kFlagRef | kFlagLowWord |
+                  (1u << kBitArrayDepthStart) | kFlagArrayNarrow);
+    }
+
+    static Type NarrowCoreArrayType() {
+      return Type(kFlagNarrow | kFlagRef | kFlagLowWord |
+                  (1u << kBitArrayDepthStart) | kFlagArrayNarrow | kFlagArrayCore);
+    }
+
+    static Type UnknownArrayType() {
+      return Type(kFlagNarrow | kFlagRef | kFlagLowWord | (1u << kBitArrayDepthStart));
+    }
+
+    static Type ArrayType(uint32_t array_depth, Type nested_type);
+    static Type ArrayTypeFromComponent(Type component_type);
+    static Type ShortyType(char shorty);
+    static Type DexType(const DexFile* dex_file, uint32_t type_idx);
+
+    bool IsDefined() {
+      return raw_bits_ != 0u;
+    }
+
+    bool SizeConflict() const {
+      // NOTE: Ignore array element conflicts that don't propagate to direct conflicts.
+      return (Wide() && Narrow()) || (HighWord() && LowWord());
+    }
+
+    bool TypeConflict() const {
+      // NOTE: Ignore array element conflicts that don't propagate to direct conflicts.
+      return (raw_bits_ & kMaskType) != 0u && !IsPowerOfTwo(raw_bits_ & kMaskType);  // 2+ bits.
+    }
+
+    void MarkSizeConflict() {
+      SetBits(kFlagLowWord | kFlagHighWord);
+    }
+
+    void MarkTypeConflict() {
+      // Mark all three type bits so that merging any other type bits will not change this type.
+      SetBits(kFlagFp | kFlagCore | kFlagRef);
+    }
+
+    void CheckPureRef() const {
+      DCHECK_EQ(raw_bits_ & (kMaskWideAndType | kMaskWord), kFlagNarrow | kFlagRef | kFlagLowWord);
+    }
+
+    // If reference, don't treat as possible null and require precise type.
+    //
+    // References without this flag are allowed to have a type conflict and their
+    // type will not be propagated down. However, for simplicity we allow propagation
+    // of other flags up as it will affect only other null references; should those
+    // references be marked non-null later, we would have to do it anyway.
+    // NOTE: This is a negative "non-null" flag rather then a positive "is-null"
+    // to simplify merging together with other non-array flags.
+    bool NonNull() const {
+      return IsBitSet(kFlagNonNull);
+    }
+
+    bool Wide() const {
+      return IsBitSet(kFlagWide);
+    }
+
+    bool Narrow() const {
+      return IsBitSet(kFlagNarrow);
+    }
+
+    bool Fp() const {
+      return IsBitSet(kFlagFp);
+    }
+
+    bool Core() const {
+      return IsBitSet(kFlagCore);
+    }
+
+    bool Ref() const {
+      return IsBitSet(kFlagRef);
+    }
+
+    bool LowWord() const {
+      return IsBitSet(kFlagLowWord);
+    }
+
+    bool HighWord() const {
+      return IsBitSet(kFlagHighWord);
+    }
+
+    uint32_t ArrayDepth() const {
+      return raw_bits_ >> kBitArrayDepthStart;
+    }
+
+    Type NestedType() const {
+      DCHECK_NE(ArrayDepth(), 0u);
+      return Type(kFlagLowWord | ((raw_bits_ & kMaskArrayWideAndType) >> kArrayTypeShift));
+    }
+
+    Type ComponentType() const {
+      DCHECK_NE(ArrayDepth(), 0u);
+      Type temp(raw_bits_ - (1u << kBitArrayDepthStart));  // array_depth - 1u;
+      return (temp.ArrayDepth() != 0u) ? temp.AsNull() : NestedType();
+    }
+
+    void SetWide() {
+      SetBits(kFlagWide);
+    }
+
+    void SetNarrow() {
+      SetBits(kFlagNarrow);
+    }
+
+    void SetFp() {
+      SetBits(kFlagFp);
+    }
+
+    void SetCore() {
+      SetBits(kFlagCore);
+    }
+
+    void SetRef() {
+      SetBits(kFlagRef);
+    }
+
+    void SetLowWord() {
+      SetBits(kFlagLowWord);
+    }
+
+    void SetHighWord() {
+      SetBits(kFlagHighWord);
+    }
+
+    Type ToHighWord() const {
+      DCHECK_EQ(raw_bits_ & (kMaskWide | kMaskWord), kFlagWide | kFlagLowWord);
+      return Type(raw_bits_ ^ (kFlagLowWord | kFlagHighWord));
+    }
+
+    bool MergeHighWord(Type low_word_type) {
+      // NOTE: low_word_type may be also Narrow() or HighWord().
+      DCHECK(low_word_type.Wide() && low_word_type.LowWord());
+      return MergeBits(Type(low_word_type.raw_bits_ | kFlagHighWord),
+                       kMaskWideAndType | kFlagHighWord);
+    }
+
+    bool Copy(Type type) {
+      if (raw_bits_ != type.raw_bits_) {
+        raw_bits_ = type.raw_bits_;
+        return true;
+      }
+      return false;
+    }
+
+    // Merge non-array flags.
+    bool MergeNonArrayFlags(Type src_type) {
+      return MergeBits(src_type, kMaskNonArray);
+    }
+
+    // Merge array flags for conflict.
+    bool MergeArrayConflict(Type src_type);
+
+    // Merge all flags.
+    bool MergeStrong(Type src_type);
+
+    // Merge all flags.
+    bool MergeWeak(Type src_type);
+
+    // Get the same type but mark that it should not be treated as null.
+    Type AsNonNull() const {
+      return Type(raw_bits_ | kFlagNonNull);
+    }
+
+    // Get the same type but mark that it can be treated as null.
+    Type AsNull() const {
+      return Type(raw_bits_ & ~kFlagNonNull);
+    }
+
+   private:
+    enum FlagBits {
+      kBitNonNull = 0,
+      kBitWide,
+      kBitNarrow,
+      kBitFp,
+      kBitCore,
+      kBitRef,
+      kBitLowWord,
+      kBitHighWord,
+      kBitArrayWide,
+      kBitArrayNarrow,
+      kBitArrayFp,
+      kBitArrayCore,
+      kBitArrayRef,
+      kBitArrayDepthStart,
+    };
+    static constexpr size_t kArrayDepthBits = sizeof(uint32_t) * 8u - kBitArrayDepthStart;
+
+    static constexpr uint32_t kFlagNonNull = 1u << kBitNonNull;
+    static constexpr uint32_t kFlagWide = 1u << kBitWide;
+    static constexpr uint32_t kFlagNarrow = 1u << kBitNarrow;
+    static constexpr uint32_t kFlagFp = 1u << kBitFp;
+    static constexpr uint32_t kFlagCore = 1u << kBitCore;
+    static constexpr uint32_t kFlagRef = 1u << kBitRef;
+    static constexpr uint32_t kFlagLowWord = 1u << kBitLowWord;
+    static constexpr uint32_t kFlagHighWord = 1u << kBitHighWord;
+    static constexpr uint32_t kFlagArrayWide = 1u << kBitArrayWide;
+    static constexpr uint32_t kFlagArrayNarrow = 1u << kBitArrayNarrow;
+    static constexpr uint32_t kFlagArrayFp = 1u << kBitArrayFp;
+    static constexpr uint32_t kFlagArrayCore = 1u << kBitArrayCore;
+    static constexpr uint32_t kFlagArrayRef = 1u << kBitArrayRef;
+
+    static constexpr uint32_t kMaskWide = kFlagWide | kFlagNarrow;
+    static constexpr uint32_t kMaskType = kFlagFp | kFlagCore | kFlagRef;
+    static constexpr uint32_t kMaskWord = kFlagLowWord | kFlagHighWord;
+    static constexpr uint32_t kMaskArrayWide = kFlagArrayWide | kFlagArrayNarrow;
+    static constexpr uint32_t kMaskArrayType = kFlagArrayFp | kFlagArrayCore | kFlagArrayRef;
+    static constexpr uint32_t kMaskWideAndType = kMaskWide | kMaskType;
+    static constexpr uint32_t kMaskArrayWideAndType = kMaskArrayWide | kMaskArrayType;
+
+    static constexpr size_t kArrayTypeShift = kBitArrayWide - kBitWide;
+    static_assert(kArrayTypeShift == kBitArrayNarrow - kBitNarrow, "shift mismatch");
+    static_assert(kArrayTypeShift == kBitArrayFp - kBitFp, "shift mismatch");
+    static_assert(kArrayTypeShift == kBitArrayCore - kBitCore, "shift mismatch");
+    static_assert(kArrayTypeShift == kBitArrayRef - kBitRef, "shift mismatch");
+    static_assert((kMaskWide << kArrayTypeShift) == kMaskArrayWide, "shift mismatch");
+    static_assert((kMaskType << kArrayTypeShift) == kMaskArrayType, "shift mismatch");
+    static_assert((kMaskWideAndType << kArrayTypeShift) == kMaskArrayWideAndType, "shift mismatch");
+
+    static constexpr uint32_t kMaskArrayDepth = static_cast<uint32_t>(-1) << kBitArrayDepthStart;
+    static constexpr uint32_t kMaskNonArray = ~(kMaskArrayWideAndType | kMaskArrayDepth);
+
+    // The maximum representable array depth. If we exceed the maximum (which can happen
+    // only with an absurd nested array type in a dex file which would presumably cause
+    // OOM while being resolved), we can report false conflicts.
+    static constexpr uint32_t kMaxArrayDepth = static_cast<uint32_t>(-1) >> kBitArrayDepthStart;
+
+    explicit Type(uint32_t raw_bits) : raw_bits_(raw_bits) { }
+
+    bool IsBitSet(uint32_t flag) const {
+      return (raw_bits_ & flag) != 0u;
+    }
+
+    void SetBits(uint32_t flags) {
+      raw_bits_ |= flags;
+    }
+
+    bool MergeBits(Type src_type, uint32_t mask) {
+      uint32_t new_bits = raw_bits_ | (src_type.raw_bits_ & mask);
+      if (new_bits != raw_bits_) {
+        raw_bits_ = new_bits;
+        return true;
+      }
+      return false;
+    }
+
+    uint32_t raw_bits_;
+  };
+
+  struct MethodSignature {
+    Type return_type;
+    size_t num_params;
+    Type* param_types;
+  };
+
+  struct SplitSRegData {
+    int32_t current_mod_s_reg;
+    int32_t* starting_mod_s_reg;        // Indexed by BasicBlock::id.
+    int32_t* ending_mod_s_reg;          // Indexed by BasicBlock::id.
+
+    // NOTE: Before AddPseudoPhis(), def_phi_blocks_ marks the blocks
+    // with check-casts and the block with the original SSA reg.
+    // After AddPseudoPhis(), it marks blocks with pseudo-phis.
+    ArenaBitVector* def_phi_blocks_;    // Indexed by BasicBlock::id.
+  };
+
+  class CheckCastData : public DeletableArenaObject<kArenaAllocMisc> {
+   public:
+    CheckCastData(MIRGraph* mir_graph, ScopedArenaAllocator* alloc);
+
+    size_t NumSRegs() const {
+      return num_sregs_;
+    }
+
+    void AddCheckCast(MIR* check_cast, Type type);
+    void AddPseudoPhis();
+    void InitializeCheckCastSRegs(Type* sregs) const;
+    void MergeCheckCastConflicts(Type* sregs) const;
+    void MarkPseudoPhiBlocks(uint64_t* bb_df_attrs) const;
+
+    void Start(BasicBlock* bb);
+    bool ProcessPseudoPhis(BasicBlock* bb, Type* sregs);
+    void ProcessCheckCast(MIR* mir);
+
+    SplitSRegData* GetSplitSRegData(int32_t s_reg);
+
+   private:
+    BasicBlock* FindDefBlock(MIR* check_cast);
+    BasicBlock* FindTopologicallyEarliestPredecessor(BasicBlock* bb);
+    bool IsSRegLiveAtStart(BasicBlock* bb, int v_reg, int32_t s_reg);
+
+    MIRGraph* const mir_graph_;
+    ScopedArenaAllocator* const alloc_;
+    const size_t num_blocks_;
+    size_t num_sregs_;
+
+    // Map check-cast mir to special sreg and type.
+    struct CheckCastMapValue {
+      int32_t modified_s_reg;
+      Type type;
+    };
+    ScopedArenaSafeMap<MIR*, CheckCastMapValue> check_cast_map_;
+    ScopedArenaSafeMap<int32_t, SplitSRegData> split_sreg_data_;
+  };
+
+  static Type FieldType(const DexFile* dex_file, uint32_t field_idx);
+  static Type* PrepareIFieldTypes(const DexFile* dex_file, MIRGraph* mir_graph,
+                                  ScopedArenaAllocator* alloc);
+  static Type* PrepareSFieldTypes(const DexFile* dex_file, MIRGraph* mir_graph,
+                                  ScopedArenaAllocator* alloc);
+  static MethodSignature Signature(const DexFile* dex_file, uint32_t method_idx, bool is_static,
+                                   ScopedArenaAllocator* alloc);
+  static MethodSignature* PrepareSignatures(const DexFile* dex_file, MIRGraph* mir_graph,
+                                            ScopedArenaAllocator* alloc);
+  static CheckCastData* InitializeCheckCastData(MIRGraph* mir_graph, ScopedArenaAllocator* alloc);
+
+  void InitializeSRegs();
+
+  int32_t ModifiedSReg(int32_t s_reg);
+  int32_t PhiInputModifiedSReg(int32_t s_reg, BasicBlock* bb, size_t pred_idx);
+
+  bool UpdateSRegFromLowWordType(int32_t mod_s_reg, Type low_word_type);
+
+  MIRGraph* const mir_graph_;
+  CompilationUnit* const cu_;
+
+  // The type inference propagates types also backwards but this must not happen across
+  // check-cast. So we need to effectively split an SSA reg into two at check-cast and
+  // keep track of the types separately.
+  std::unique_ptr<CheckCastData> check_cast_data_;
+
+  size_t num_sregs_;      // Number of SSA regs or modified SSA regs, see check-cast.
+  const Type* const ifields_;                 // Indexed by MIR::meta::ifield_lowering_info.
+  const Type* const sfields_;                 // Indexed by MIR::meta::sfield_lowering_info.
+  const MethodSignature* const signatures_;   // Indexed by MIR::meta::method_lowering_info.
+  const MethodSignature current_method_signature_;
+  Type* const sregs_;     // Indexed by SSA reg or modified SSA reg, see check-cast.
+  uint64_t* const bb_df_attrs_;               // Indexed by BasicBlock::id.
+
+  friend class TypeInferenceTest;
+};
+
+}  // namespace art
+
+#endif  // ART_COMPILER_DEX_TYPE_INFERENCE_H_
diff --git a/compiler/dex/type_inference_test.cc b/compiler/dex/type_inference_test.cc
new file mode 100644
index 0000000..54b5747f
--- /dev/null
+++ b/compiler/dex/type_inference_test.cc
@@ -0,0 +1,2042 @@
+/*
+ * Copyright (C) 2015 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "base/logging.h"
+#include "compiler_ir.h"
+#include "dataflow_iterator-inl.h"
+#include "dex_flags.h"
+#include "dex/mir_field_info.h"
+#include "dex/mir_graph.h"
+#include "driver/dex_compilation_unit.h"
+#include "gtest/gtest.h"
+#include "type_inference.h"
+#include "utils/test_dex_file_builder.h"
+
+namespace art {
+
+class TypeInferenceTest : public testing::Test {
+ protected:
+  struct TypeDef {
+    const char* descriptor;
+  };
+
+  struct FieldDef {
+    const char* class_descriptor;
+    const char* type;
+    const char* name;
+  };
+
+  struct MethodDef {
+    const char* class_descriptor;
+    const char* signature;
+    const char* name;
+    InvokeType type;
+  };
+
+  struct BBDef {
+    static constexpr size_t kMaxSuccessors = 4;
+    static constexpr size_t kMaxPredecessors = 4;
+
+    BBType type;
+    size_t num_successors;
+    BasicBlockId successors[kMaxPredecessors];
+    size_t num_predecessors;
+    BasicBlockId predecessors[kMaxPredecessors];
+  };
+
+  struct MIRDef {
+    static constexpr size_t kMaxSsaDefs = 2;
+    static constexpr size_t kMaxSsaUses = 4;
+
+    BasicBlockId bbid;
+    Instruction::Code opcode;
+    int64_t value;
+    uint32_t metadata;
+    size_t num_uses;
+    int32_t uses[kMaxSsaUses];
+    size_t num_defs;
+    int32_t defs[kMaxSsaDefs];
+  };
+
+#define DEF_SUCC0() \
+    0u, { }
+#define DEF_SUCC1(s1) \
+    1u, { s1 }
+#define DEF_SUCC2(s1, s2) \
+    2u, { s1, s2 }
+#define DEF_SUCC3(s1, s2, s3) \
+    3u, { s1, s2, s3 }
+#define DEF_SUCC4(s1, s2, s3, s4) \
+    4u, { s1, s2, s3, s4 }
+#define DEF_PRED0() \
+    0u, { }
+#define DEF_PRED1(p1) \
+    1u, { p1 }
+#define DEF_PRED2(p1, p2) \
+    2u, { p1, p2 }
+#define DEF_PRED3(p1, p2, p3) \
+    3u, { p1, p2, p3 }
+#define DEF_PRED4(p1, p2, p3, p4) \
+    4u, { p1, p2, p3, p4 }
+#define DEF_BB(type, succ, pred) \
+    { type, succ, pred }
+
+#define DEF_CONST(bb, opcode, reg, value) \
+    { bb, opcode, value, 0u, 0, { }, 1, { reg } }
+#define DEF_CONST_WIDE(bb, opcode, reg, value) \
+    { bb, opcode, value, 0u, 0, { }, 2, { reg, reg + 1 } }
+#define DEF_CONST_STRING(bb, opcode, reg, index) \
+    { bb, opcode, index, 0u, 0, { }, 1, { reg } }
+#define DEF_IGET(bb, opcode, reg, obj, field_info) \
+    { bb, opcode, 0u, field_info, 1, { obj }, 1, { reg } }
+#define DEF_IGET_WIDE(bb, opcode, reg, obj, field_info) \
+    { bb, opcode, 0u, field_info, 1, { obj }, 2, { reg, reg + 1 } }
+#define DEF_IPUT(bb, opcode, reg, obj, field_info) \
+    { bb, opcode, 0u, field_info, 2, { reg, obj }, 0, { } }
+#define DEF_IPUT_WIDE(bb, opcode, reg, obj, field_info) \
+    { bb, opcode, 0u, field_info, 3, { reg, reg + 1, obj }, 0, { } }
+#define DEF_SGET(bb, opcode, reg, field_info) \
+    { bb, opcode, 0u, field_info, 0, { }, 1, { reg } }
+#define DEF_SGET_WIDE(bb, opcode, reg, field_info) \
+    { bb, opcode, 0u, field_info, 0, { }, 2, { reg, reg + 1 } }
+#define DEF_SPUT(bb, opcode, reg, field_info) \
+    { bb, opcode, 0u, field_info, 1, { reg }, 0, { } }
+#define DEF_SPUT_WIDE(bb, opcode, reg, field_info) \
+    { bb, opcode, 0u, field_info, 2, { reg, reg + 1 }, 0, { } }
+#define DEF_AGET(bb, opcode, reg, obj, idx) \
+    { bb, opcode, 0u, 0u, 2, { obj, idx }, 1, { reg } }
+#define DEF_AGET_WIDE(bb, opcode, reg, obj, idx) \
+    { bb, opcode, 0u, 0u, 2, { obj, idx }, 2, { reg, reg + 1 } }
+#define DEF_APUT(bb, opcode, reg, obj, idx) \
+    { bb, opcode, 0u, 0u, 3, { reg, obj, idx }, 0, { } }
+#define DEF_APUT_WIDE(bb, opcode, reg, obj, idx) \
+    { bb, opcode, 0u, 0u, 4, { reg, reg + 1, obj, idx }, 0, { } }
+#define DEF_INVOKE0(bb, opcode, method_idx) \
+    { bb, opcode, 0u, method_idx, 0, { }, 0, { } }
+#define DEF_INVOKE1(bb, opcode, reg, method_idx) \
+    { bb, opcode, 0u, method_idx, 1, { reg }, 0, { } }
+#define DEF_INVOKE2(bb, opcode, reg1, reg2, method_idx) \
+    { bb, opcode, 0u, method_idx, 2, { reg1, reg2 }, 0, { } }
+#define DEF_IFZ(bb, opcode, reg) \
+    { bb, opcode, 0u, 0u, 1, { reg }, 0, { } }
+#define DEF_MOVE(bb, opcode, reg, src) \
+    { bb, opcode, 0u, 0u, 1, { src }, 1, { reg } }
+#define DEF_MOVE_WIDE(bb, opcode, reg, src) \
+    { bb, opcode, 0u, 0u, 2, { src, src + 1 }, 2, { reg, reg + 1 } }
+#define DEF_PHI2(bb, reg, src1, src2) \
+    { bb, static_cast<Instruction::Code>(kMirOpPhi), 0, 0u, 2u, { src1, src2 }, 1, { reg } }
+#define DEF_BINOP(bb, opcode, result, src1, src2) \
+    { bb, opcode, 0u, 0u, 2, { src1, src2 }, 1, { result } }
+#define DEF_UNOP(bb, opcode, result, src) DEF_MOVE(bb, opcode, result, src)
+#define DEF_NULOP(bb, opcode, result) DEF_CONST(bb, opcode, result, 0)
+#define DEF_NULOP_WIDE(bb, opcode, result) DEF_CONST_WIDE(bb, opcode, result, 0)
+#define DEF_CHECK_CAST(bb, opcode, reg, type) \
+    { bb, opcode, 0, type, 1, { reg }, 0, { } }
+#define DEF_NEW_ARRAY(bb, opcode, reg, length, type) \
+    { bb, opcode, 0, type, 1, { length }, 1, { reg } }
+
+  void AddTypes(const TypeDef* defs, size_t count) {
+    for (size_t i = 0; i != count; ++i) {
+      const TypeDef* def = &defs[i];
+      dex_file_builder_.AddType(def->descriptor);
+    }
+  }
+
+  template <size_t count>
+  void PrepareTypes(const TypeDef (&defs)[count]) {
+    type_defs_ = defs;
+    type_count_ = count;
+    AddTypes(defs, count);
+  }
+
+  void AddFields(const FieldDef* defs, size_t count) {
+    for (size_t i = 0; i != count; ++i) {
+      const FieldDef* def = &defs[i];
+      dex_file_builder_.AddField(def->class_descriptor, def->type, def->name);
+    }
+  }
+
+  template <size_t count>
+  void PrepareIFields(const FieldDef (&defs)[count]) {
+    ifield_defs_ = defs;
+    ifield_count_ = count;
+    AddFields(defs, count);
+  }
+
+  template <size_t count>
+  void PrepareSFields(const FieldDef (&defs)[count]) {
+    sfield_defs_ = defs;
+    sfield_count_ = count;
+    AddFields(defs, count);
+  }
+
+  void AddMethods(const MethodDef* defs, size_t count) {
+    for (size_t i = 0; i != count; ++i) {
+      const MethodDef* def = &defs[i];
+      dex_file_builder_.AddMethod(def->class_descriptor, def->signature, def->name);
+    }
+  }
+
+  template <size_t count>
+  void PrepareMethods(const MethodDef (&defs)[count]) {
+    method_defs_ = defs;
+    method_count_ = count;
+    AddMethods(defs, count);
+  }
+
+  DexMemAccessType AccessTypeForDescriptor(const char* descriptor) {
+    switch (descriptor[0]) {
+      case 'I':
+      case 'F':
+        return kDexMemAccessWord;
+      case 'J':
+      case 'D':
+        return kDexMemAccessWide;
+      case '[':
+      case 'L':
+        return kDexMemAccessObject;
+      case 'Z':
+        return kDexMemAccessBoolean;
+      case 'B':
+        return kDexMemAccessByte;
+      case 'C':
+        return kDexMemAccessChar;
+      case 'S':
+        return kDexMemAccessShort;
+      default:
+        LOG(FATAL) << "Bad descriptor: " << descriptor;
+        UNREACHABLE();
+    }
+  }
+
+  size_t CountIns(const std::string& test_method_signature, bool is_static) {
+    const char* sig = test_method_signature.c_str();
+    CHECK_EQ(sig[0], '(');
+    ++sig;
+    size_t result = is_static ? 0u : 1u;
+    while (*sig != ')') {
+      result += (AccessTypeForDescriptor(sig) == kDexMemAccessWide) ? 2u : 1u;
+      while (*sig == '[') {
+        ++sig;
+      }
+      if (*sig == 'L') {
+        do {
+          ++sig;
+          CHECK(*sig != '\0' && *sig != ')');
+        } while (*sig != ';');
+      }
+      ++sig;
+    }
+    return result;
+  }
+
+  void BuildDexFile(const std::string& test_method_signature, bool is_static) {
+    dex_file_builder_.AddMethod(kClassName, test_method_signature, kMethodName);
+    dex_file_ = dex_file_builder_.Build(kDexLocation);
+    cu_.dex_file = dex_file_.get();
+    cu_.method_idx = dex_file_builder_.GetMethodIdx(kClassName, test_method_signature, kMethodName);
+    cu_.access_flags = is_static ? kAccStatic : 0u;
+    cu_.mir_graph->m_units_.push_back(new (cu_.mir_graph->arena_) DexCompilationUnit(
+        &cu_, cu_.class_loader, cu_.class_linker, *cu_.dex_file, nullptr /* code_item not used */,
+        0u /* class_def_idx not used */, 0u /* method_index not used */,
+        cu_.access_flags, nullptr /* verified_method not used */));
+    cu_.mir_graph->current_method_ = 0u;
+    code_item_ = static_cast<DexFile::CodeItem*>(
+        cu_.arena.Alloc(sizeof(DexFile::CodeItem), kArenaAllocMisc));
+
+    code_item_->ins_size_ = CountIns(test_method_signature, is_static);
+    code_item_->registers_size_ = kLocalVRs + code_item_->ins_size_;
+    cu_.mir_graph->current_code_item_ = code_item_;
+    cu_.mir_graph->num_ssa_regs_ = kMaxSsaRegs;
+
+    cu_.mir_graph->ifield_lowering_infos_.clear();
+    cu_.mir_graph->ifield_lowering_infos_.reserve(ifield_count_);
+    for (size_t i = 0u; i != ifield_count_; ++i) {
+      const FieldDef* def = &ifield_defs_[i];
+      uint32_t field_idx =
+          dex_file_builder_.GetFieldIdx(def->class_descriptor, def->type, def->name);
+      MirIFieldLoweringInfo field_info(field_idx, AccessTypeForDescriptor(def->type), false);
+      field_info.declaring_dex_file_ = cu_.dex_file;
+      field_info.declaring_field_idx_ = field_idx;
+      cu_.mir_graph->ifield_lowering_infos_.push_back(field_info);
+    }
+
+    cu_.mir_graph->sfield_lowering_infos_.clear();
+    cu_.mir_graph->sfield_lowering_infos_.reserve(sfield_count_);
+    for (size_t i = 0u; i != sfield_count_; ++i) {
+      const FieldDef* def = &sfield_defs_[i];
+      uint32_t field_idx =
+          dex_file_builder_.GetFieldIdx(def->class_descriptor, def->type, def->name);
+      MirSFieldLoweringInfo field_info(field_idx, AccessTypeForDescriptor(def->type));
+      field_info.declaring_dex_file_ = cu_.dex_file;
+      field_info.declaring_field_idx_ = field_idx;
+      cu_.mir_graph->sfield_lowering_infos_.push_back(field_info);
+    }
+
+    cu_.mir_graph->method_lowering_infos_.clear();
+    cu_.mir_graph->method_lowering_infos_.reserve(ifield_count_);
+    for (size_t i = 0u; i != method_count_; ++i) {
+      const MethodDef* def = &method_defs_[i];
+      uint32_t method_idx =
+          dex_file_builder_.GetMethodIdx(def->class_descriptor, def->signature, def->name);
+      MirMethodLoweringInfo method_info(method_idx, def->type, false);
+      method_info.declaring_dex_file_ = cu_.dex_file;
+      method_info.declaring_method_idx_ = method_idx;
+      cu_.mir_graph->method_lowering_infos_.push_back(method_info);
+    }
+  }
+
+  void DoPrepareBasicBlocks(const BBDef* defs, size_t count) {
+    cu_.mir_graph->block_id_map_.clear();
+    cu_.mir_graph->block_list_.clear();
+    ASSERT_LT(3u, count);  // null, entry, exit and at least one bytecode block.
+    ASSERT_EQ(kNullBlock, defs[0].type);
+    ASSERT_EQ(kEntryBlock, defs[1].type);
+    ASSERT_EQ(kExitBlock, defs[2].type);
+    for (size_t i = 0u; i != count; ++i) {
+      const BBDef* def = &defs[i];
+      BasicBlock* bb = cu_.mir_graph->CreateNewBB(def->type);
+      if (def->num_successors <= 2) {
+        bb->successor_block_list_type = kNotUsed;
+        bb->fall_through = (def->num_successors >= 1) ? def->successors[0] : 0u;
+        bb->taken = (def->num_successors >= 2) ? def->successors[1] : 0u;
+      } else {
+        bb->successor_block_list_type = kPackedSwitch;
+        bb->fall_through = 0u;
+        bb->taken = 0u;
+        bb->successor_blocks.reserve(def->num_successors);
+        for (size_t j = 0u; j != def->num_successors; ++j) {
+          SuccessorBlockInfo* successor_block_info =
+              static_cast<SuccessorBlockInfo*>(cu_.arena.Alloc(sizeof(SuccessorBlockInfo),
+                                                               kArenaAllocSuccessor));
+          successor_block_info->block = j;
+          successor_block_info->key = 0u;  // Not used by class init check elimination.
+          bb->successor_blocks.push_back(successor_block_info);
+        }
+      }
+      bb->predecessors.assign(def->predecessors, def->predecessors + def->num_predecessors);
+      if (def->type == kDalvikByteCode || def->type == kEntryBlock || def->type == kExitBlock) {
+        bb->data_flow_info = static_cast<BasicBlockDataFlow*>(
+            cu_.arena.Alloc(sizeof(BasicBlockDataFlow), kArenaAllocDFInfo));
+        bb->data_flow_info->live_in_v = live_in_v_;
+      }
+    }
+    ASSERT_EQ(count, cu_.mir_graph->block_list_.size());
+    cu_.mir_graph->entry_block_ = cu_.mir_graph->block_list_[1];
+    ASSERT_EQ(kEntryBlock, cu_.mir_graph->entry_block_->block_type);
+    cu_.mir_graph->exit_block_ = cu_.mir_graph->block_list_[2];
+    ASSERT_EQ(kExitBlock, cu_.mir_graph->exit_block_->block_type);
+  }
+
+  template <size_t count>
+  void PrepareBasicBlocks(const BBDef (&defs)[count]) {
+    DoPrepareBasicBlocks(defs, count);
+  }
+
+  void PrepareSingleBlock() {
+    static const BBDef bbs[] = {
+        DEF_BB(kNullBlock, DEF_SUCC0(), DEF_PRED0()),
+        DEF_BB(kEntryBlock, DEF_SUCC1(3), DEF_PRED0()),
+        DEF_BB(kExitBlock, DEF_SUCC0(), DEF_PRED1(3)),
+        DEF_BB(kDalvikByteCode, DEF_SUCC1(2), DEF_PRED1(1)),
+    };
+    PrepareBasicBlocks(bbs);
+  }
+
+  void PrepareDiamond() {
+    static const BBDef bbs[] = {
+        DEF_BB(kNullBlock, DEF_SUCC0(), DEF_PRED0()),
+        DEF_BB(kEntryBlock, DEF_SUCC1(3), DEF_PRED0()),
+        DEF_BB(kExitBlock, DEF_SUCC0(), DEF_PRED1(6)),
+        DEF_BB(kDalvikByteCode, DEF_SUCC2(4, 5), DEF_PRED1(1)),
+        DEF_BB(kDalvikByteCode, DEF_SUCC1(6), DEF_PRED1(3)),
+        DEF_BB(kDalvikByteCode, DEF_SUCC1(6), DEF_PRED1(3)),
+        DEF_BB(kDalvikByteCode, DEF_SUCC1(2), DEF_PRED2(4, 5)),
+    };
+    PrepareBasicBlocks(bbs);
+  }
+
+  void PrepareLoop() {
+    static const BBDef bbs[] = {
+        DEF_BB(kNullBlock, DEF_SUCC0(), DEF_PRED0()),
+        DEF_BB(kEntryBlock, DEF_SUCC1(3), DEF_PRED0()),
+        DEF_BB(kExitBlock, DEF_SUCC0(), DEF_PRED1(5)),
+        DEF_BB(kDalvikByteCode, DEF_SUCC1(4), DEF_PRED1(1)),
+        DEF_BB(kDalvikByteCode, DEF_SUCC2(5, 4), DEF_PRED2(3, 4)),  // "taken" loops to self.
+        DEF_BB(kDalvikByteCode, DEF_SUCC1(2), DEF_PRED1(4)),
+    };
+    PrepareBasicBlocks(bbs);
+  }
+
+  void DoPrepareMIRs(const MIRDef* defs, size_t count) {
+    mir_count_ = count;
+    mirs_ = cu_.arena.AllocArray<MIR>(count, kArenaAllocMIR);
+    ssa_reps_.resize(count);
+    for (size_t i = 0u; i != count; ++i) {
+      const MIRDef* def = &defs[i];
+      MIR* mir = &mirs_[i];
+      ASSERT_LT(def->bbid, cu_.mir_graph->block_list_.size());
+      BasicBlock* bb = cu_.mir_graph->block_list_[def->bbid];
+      bb->AppendMIR(mir);
+      mir->dalvikInsn.opcode = def->opcode;
+      mir->dalvikInsn.vB = static_cast<int32_t>(def->value);
+      mir->dalvikInsn.vB_wide = def->value;
+      if (IsInstructionIGetOrIPut(def->opcode)) {
+        ASSERT_LT(def->metadata, cu_.mir_graph->ifield_lowering_infos_.size());
+        mir->meta.ifield_lowering_info = def->metadata;
+        ASSERT_EQ(cu_.mir_graph->ifield_lowering_infos_[def->metadata].MemAccessType(),
+                  IGetOrIPutMemAccessType(def->opcode));
+        cu_.mir_graph->merged_df_flags_ |= DF_IFIELD;
+      } else if (IsInstructionSGetOrSPut(def->opcode)) {
+        ASSERT_LT(def->metadata, cu_.mir_graph->sfield_lowering_infos_.size());
+        mir->meta.sfield_lowering_info = def->metadata;
+        ASSERT_EQ(cu_.mir_graph->sfield_lowering_infos_[def->metadata].MemAccessType(),
+                  SGetOrSPutMemAccessType(def->opcode));
+        cu_.mir_graph->merged_df_flags_ |= DF_SFIELD;
+      } else if (IsInstructionInvoke(def->opcode)) {
+        ASSERT_LT(def->metadata, cu_.mir_graph->method_lowering_infos_.size());
+        mir->meta.method_lowering_info = def->metadata;
+        mir->dalvikInsn.vA = def->num_uses;
+        cu_.mir_graph->merged_df_flags_ |= DF_FORMAT_35C;
+      } else if (def->opcode == static_cast<Instruction::Code>(kMirOpPhi)) {
+        mir->meta.phi_incoming =
+            allocator_->AllocArray<BasicBlockId>(def->num_uses, kArenaAllocDFInfo);
+        ASSERT_EQ(def->num_uses, bb->predecessors.size());
+        std::copy(bb->predecessors.begin(), bb->predecessors.end(), mir->meta.phi_incoming);
+      } else if (def->opcode == Instruction::CHECK_CAST) {
+        ASSERT_LT(def->metadata, type_count_);
+        mir->dalvikInsn.vB = dex_file_builder_.GetTypeIdx(type_defs_[def->metadata].descriptor);
+        cu_.mir_graph->merged_df_flags_ |= DF_CHK_CAST;
+      } else if (def->opcode == Instruction::NEW_ARRAY) {
+        ASSERT_LT(def->metadata, type_count_);
+        mir->dalvikInsn.vC = dex_file_builder_.GetTypeIdx(type_defs_[def->metadata].descriptor);
+      }
+      mir->ssa_rep = &ssa_reps_[i];
+      mir->ssa_rep->num_uses = def->num_uses;
+      mir->ssa_rep->uses = const_cast<int32_t*>(def->uses);  // Not modified by LVN.
+      mir->ssa_rep->num_defs = def->num_defs;
+      mir->ssa_rep->defs = const_cast<int32_t*>(def->defs);  // Not modified by LVN.
+      mir->dalvikInsn.opcode = def->opcode;
+      mir->offset = i;  // LVN uses offset only for debug output
+      mir->optimization_flags = 0u;
+    }
+    code_item_->insns_size_in_code_units_ = 2u * count;
+  }
+
+  template <size_t count>
+  void PrepareMIRs(const MIRDef (&defs)[count]) {
+    DoPrepareMIRs(defs, count);
+  }
+
+  // BasicBlockDataFlow::vreg_to_ssa_map_exit is used only for check-casts.
+  void AllocEndingVRegToSRegMaps() {
+    AllNodesIterator iterator(cu_.mir_graph.get());
+    for (BasicBlock* bb = iterator.Next(); bb != nullptr; bb = iterator.Next()) {
+      if (bb->data_flow_info != nullptr) {
+        if (bb->data_flow_info->vreg_to_ssa_map_exit == nullptr) {
+          size_t num_vregs = code_item_->registers_size_;
+          bb->data_flow_info->vreg_to_ssa_map_exit = static_cast<int32_t*>(
+              cu_.arena.AllocArray<int32_t>(num_vregs, kArenaAllocDFInfo));
+          std::fill_n(bb->data_flow_info->vreg_to_ssa_map_exit, num_vregs, INVALID_SREG);
+        }
+      }
+    }
+  }
+
+  template <size_t count>
+  void MapVRegToSReg(int vreg, int32_t sreg, const BasicBlockId (&bb_ids)[count]) {
+    AllocEndingVRegToSRegMaps();
+    for (BasicBlockId bb_id : bb_ids) {
+      BasicBlock* bb = cu_.mir_graph->GetBasicBlock(bb_id);
+      CHECK(bb != nullptr);
+      CHECK(bb->data_flow_info != nullptr);
+      CHECK(bb->data_flow_info->vreg_to_ssa_map_exit != nullptr);
+      bb->data_flow_info->vreg_to_ssa_map_exit[vreg] = sreg;
+    }
+  }
+
+  void PerformTypeInference() {
+    cu_.mir_graph->SSATransformationStart();
+    cu_.mir_graph->ComputeDFSOrders();
+    cu_.mir_graph->ComputeDominators();
+    cu_.mir_graph->ComputeTopologicalSortOrder();
+    cu_.mir_graph->SSATransformationEnd();
+    ASSERT_TRUE(type_inference_ == nullptr);
+    type_inference_.reset(new (allocator_.get()) TypeInference(cu_.mir_graph.get(),
+                                                               allocator_.get()));
+    RepeatingPreOrderDfsIterator iter(cu_.mir_graph.get());
+    bool changed = false;
+    for (BasicBlock* bb = iter.Next(changed); bb != nullptr; bb = iter.Next(changed)) {
+      changed = type_inference_->Apply(bb);
+    }
+    type_inference_->Finish();
+  }
+
+  TypeInferenceTest()
+      : pool_(),
+        cu_(&pool_, kRuntimeISA, nullptr, nullptr),
+        mir_count_(0u),
+        mirs_(nullptr),
+        code_item_(nullptr),
+        ssa_reps_(),
+        allocator_(),
+        live_in_v_(new (&cu_.arena) ArenaBitVector(&cu_.arena, kMaxSsaRegs, false, kBitMapMisc)),
+        type_defs_(nullptr),
+        type_count_(0u),
+        ifield_defs_(nullptr),
+        ifield_count_(0u),
+        sfield_defs_(nullptr),
+        sfield_count_(0u),
+        method_defs_(nullptr),
+        method_count_(0u),
+        dex_file_builder_(),
+        dex_file_(nullptr) {
+    cu_.mir_graph.reset(new MIRGraph(&cu_, &cu_.arena));
+    allocator_.reset(ScopedArenaAllocator::Create(&cu_.arena_stack));
+    // Bind all possible sregs to live vregs for test purposes.
+    live_in_v_->SetInitialBits(kMaxSsaRegs);
+    cu_.mir_graph->reg_location_ = static_cast<RegLocation*>(cu_.arena.Alloc(
+        kMaxSsaRegs * sizeof(cu_.mir_graph->reg_location_[0]), kArenaAllocRegAlloc));
+    // Bind all possible sregs to live vregs for test purposes.
+    live_in_v_->SetInitialBits(kMaxSsaRegs);
+    cu_.mir_graph->ssa_base_vregs_.reserve(kMaxSsaRegs);
+    cu_.mir_graph->ssa_subscripts_.reserve(kMaxSsaRegs);
+    for (unsigned int i = 0; i < kMaxSsaRegs; i++) {
+      cu_.mir_graph->ssa_base_vregs_.push_back(i);
+      cu_.mir_graph->ssa_subscripts_.push_back(0);
+    }
+  }
+
+  enum ExpectFlags : uint32_t {
+    kExpectWide         = 0x0001u,
+    kExpectNarrow       = 0x0002u,
+    kExpectFp           = 0x0004u,
+    kExpectCore         = 0x0008u,
+    kExpectRef          = 0x0010u,
+    kExpectArrayWide    = 0x0020u,
+    kExpectArrayNarrow  = 0x0040u,
+    kExpectArrayFp      = 0x0080u,
+    kExpectArrayCore    = 0x0100u,
+    kExpectArrayRef     = 0x0200u,
+    kExpectNull         = 0x0400u,
+    kExpectHigh         = 0x0800u,  // Reserved for ExpectSRegType().
+  };
+
+  struct SRegExpectation {
+    uint32_t array_depth;
+    uint32_t flags;
+  };
+
+  void ExpectSRegType(int s_reg, const SRegExpectation& expectation, bool check_loc = true) {
+    uint32_t flags = expectation.flags;
+    uint32_t array_depth = expectation.array_depth;
+    TypeInference::Type type = type_inference_->sregs_[s_reg];
+
+    if (check_loc) {
+      RegLocation loc = cu_.mir_graph->reg_location_[s_reg];
+      EXPECT_EQ((flags & kExpectWide) != 0u, loc.wide) << s_reg;
+      EXPECT_EQ((flags & kExpectFp) != 0u, loc.fp) << s_reg;
+      EXPECT_EQ((flags & kExpectCore) != 0u, loc.core) << s_reg;
+      EXPECT_EQ((flags & kExpectRef) != 0u, loc.ref) << s_reg;
+      EXPECT_EQ((flags & kExpectHigh) != 0u, loc.high_word) << s_reg;
+    }
+
+    EXPECT_EQ((flags & kExpectWide) != 0u, type.Wide()) << s_reg;
+    EXPECT_EQ((flags & kExpectNarrow) != 0u, type.Narrow()) << s_reg;
+    EXPECT_EQ((flags & kExpectFp) != 0u, type.Fp()) << s_reg;
+    EXPECT_EQ((flags & kExpectCore) != 0u, type.Core()) << s_reg;
+    EXPECT_EQ((flags & kExpectRef) != 0u, type.Ref()) << s_reg;
+    EXPECT_EQ((flags & kExpectHigh) == 0u, type.LowWord()) << s_reg;
+    EXPECT_EQ((flags & kExpectHigh) != 0u, type.HighWord()) << s_reg;
+
+    if ((flags & kExpectRef) != 0u) {
+      EXPECT_EQ((flags & kExpectNull) != 0u, !type.NonNull()) << s_reg;
+    } else {
+      // Null should be checked only for references.
+      ASSERT_EQ((flags & kExpectNull), 0u);
+    }
+
+    ASSERT_EQ(array_depth, type.ArrayDepth()) << s_reg;
+    if (array_depth != 0u) {
+      ASSERT_NE((flags & kExpectRef), 0u);
+      TypeInference::Type nested_type = type.NestedType();
+      EXPECT_EQ((flags & kExpectArrayWide) != 0u, nested_type.Wide()) << s_reg;
+      EXPECT_EQ((flags & kExpectArrayNarrow) != 0u, nested_type.Narrow()) << s_reg;
+      EXPECT_EQ((flags & kExpectArrayFp) != 0u, nested_type.Fp()) << s_reg;
+      EXPECT_EQ((flags & kExpectArrayCore) != 0u, nested_type.Core()) << s_reg;
+      EXPECT_EQ((flags & kExpectArrayRef) != 0u, nested_type.Ref()) << s_reg;
+    }
+    if (!type.Narrow() && type.LowWord() &&
+        (expectation.flags & (kExpectWide | kExpectNarrow | kExpectHigh)) == kExpectWide) {
+      SRegExpectation high_expectation = { array_depth, flags | kExpectHigh };
+      ExpectSRegType(s_reg + 1, high_expectation);
+    }
+  }
+
+  void ExpectCore(int s_reg, bool core) {
+    EXPECT_EQ(core, type_inference_->sregs_[s_reg].Core());
+  }
+
+  void ExpectRef(int s_reg, bool ref) {
+    EXPECT_EQ(ref, type_inference_->sregs_[s_reg].Ref());
+  }
+
+  void ExpectArrayDepth(int s_reg, uint32_t array_depth) {
+    EXPECT_EQ(array_depth, type_inference_->sregs_[s_reg].ArrayDepth());
+  }
+
+  static constexpr size_t kMaxSsaRegs = 16384u;
+  static constexpr uint16_t kLocalVRs = 1000u;
+
+  static constexpr const char* kDexLocation = "TypeInferenceDexFile;";
+  static constexpr const char* kClassName = "LTypeInferenceTest;";
+  static constexpr const char* kMethodName = "test";
+
+  ArenaPool pool_;
+  CompilationUnit cu_;
+  size_t mir_count_;
+  MIR* mirs_;
+  DexFile::CodeItem* code_item_;
+  std::vector<SSARepresentation> ssa_reps_;
+  std::unique_ptr<ScopedArenaAllocator> allocator_;
+  std::unique_ptr<TypeInference> type_inference_;
+  ArenaBitVector* live_in_v_;
+
+  const TypeDef* type_defs_;
+  size_t type_count_;
+  const FieldDef* ifield_defs_;
+  size_t ifield_count_;
+  const FieldDef* sfield_defs_;
+  size_t sfield_count_;
+  const MethodDef* method_defs_;
+  size_t method_count_;
+
+  TestDexFileBuilder dex_file_builder_;
+  std::unique_ptr<const DexFile> dex_file_;
+};
+
+TEST_F(TypeInferenceTest, IGet) {
+  static const FieldDef ifields[] = {
+      { kClassName, "B", "byteField" },
+      { kClassName, "C", "charField" },
+      { kClassName, "D", "doubleField" },
+      { kClassName, "F", "floatField" },
+      { kClassName, "I", "intField" },
+      { kClassName, "J", "longField" },
+      { kClassName, "S", "shortField" },
+      { kClassName, "Z", "booleanField" },
+      { kClassName, "Ljava/lang/Object;", "objectField" },
+      { kClassName, "[Ljava/lang/Object;", "objectArrayField" },
+  };
+  constexpr uint32_t thiz = kLocalVRs;
+  static const MIRDef mirs[] = {
+      DEF_IGET(3u, Instruction::IGET_BYTE, 0u, thiz, 0u),
+      DEF_IGET(3u, Instruction::IGET_CHAR, 1u, thiz, 1u),
+      DEF_IGET_WIDE(3u, Instruction::IGET_WIDE, 2u, thiz, 2u),
+      DEF_IGET(3u, Instruction::IGET, 4u, thiz, 3u),
+      DEF_IGET(3u, Instruction::IGET, 5u, thiz, 4u),
+      DEF_IGET_WIDE(3u, Instruction::IGET_WIDE, 6u, thiz, 5u),
+      DEF_IGET(3u, Instruction::IGET_SHORT, 8u, thiz, 6u),
+      DEF_IGET(3u, Instruction::IGET_BOOLEAN, 9u, thiz, 7u),
+      DEF_IGET(3u, Instruction::IGET_OBJECT, 10u, thiz, 8u),
+      DEF_IGET(3u, Instruction::IGET_OBJECT, 11u, thiz, 9u),
+  };
+
+  PrepareIFields(ifields);
+  BuildDexFile("()V", false);
+  PrepareSingleBlock();
+  PrepareMIRs(mirs);
+  PerformTypeInference();
+
+  ASSERT_EQ(arraysize(mirs), mir_count_);
+  static const SRegExpectation expectations[] = {
+      { 0u, kExpectCore | kExpectNarrow },
+      { 0u, kExpectCore | kExpectNarrow },
+      { 0u, kExpectFp | kExpectWide },
+      { 0u, kExpectFp | kExpectNarrow },
+      { 0u, kExpectCore | kExpectNarrow },
+      { 0u, kExpectCore | kExpectWide },
+      { 0u, kExpectCore | kExpectNarrow },
+      { 0u, kExpectCore | kExpectNarrow },
+      { 0u, kExpectRef | kExpectNarrow },
+      { 1u, kExpectRef | kExpectNarrow | kExpectArrayRef | kExpectArrayNarrow },
+  };
+  static_assert(arraysize(expectations) == arraysize(mirs), "array size mismatch");
+  for (size_t i = 0; i != arraysize(expectations); ++i) {
+    EXPECT_EQ(mirs[i].opcode, mirs_[i].dalvikInsn.opcode);
+    ASSERT_LE(1u, mirs_[i].ssa_rep->num_defs);
+    ExpectSRegType(mirs_[i].ssa_rep->defs[0], expectations[i]);
+  }
+  EXPECT_EQ(cu_.disable_opt & (1u << kPromoteRegs), 0u);
+  EXPECT_FALSE(cu_.mir_graph->PuntToInterpreter());
+}
+
+TEST_F(TypeInferenceTest, SGet) {
+  static const FieldDef sfields[] = {
+      { kClassName, "B", "staticByteField" },
+      { kClassName, "C", "staticCharField" },
+      { kClassName, "D", "staticDoubleField" },
+      { kClassName, "F", "staticFloatField" },
+      { kClassName, "I", "staticIntField" },
+      { kClassName, "J", "staticLongField" },
+      { kClassName, "S", "staticShortField" },
+      { kClassName, "Z", "staticBooleanField" },
+      { kClassName, "Ljava/lang/Object;", "staticObjectField" },
+      { kClassName, "[Ljava/lang/Object;", "staticObjectArrayField" },
+  };
+  static const MIRDef mirs[] = {
+      DEF_SGET(3u, Instruction::SGET_BYTE, 0u, 0u),
+      DEF_SGET(3u, Instruction::SGET_CHAR, 1u, 1u),
+      DEF_SGET_WIDE(3u, Instruction::SGET_WIDE, 2u, 2u),
+      DEF_SGET(3u, Instruction::SGET, 4u, 3u),
+      DEF_SGET(3u, Instruction::SGET, 5u, 4u),
+      DEF_SGET_WIDE(3u, Instruction::SGET_WIDE, 6u, 5u),
+      DEF_SGET(3u, Instruction::SGET_SHORT, 8u, 6u),
+      DEF_SGET(3u, Instruction::SGET_BOOLEAN, 9u, 7u),
+      DEF_SGET(3u, Instruction::SGET_OBJECT, 10u, 8u),
+      DEF_SGET(3u, Instruction::SGET_OBJECT, 11u, 9u),
+  };
+
+  PrepareSFields(sfields);
+  BuildDexFile("()V", true);
+  PrepareSingleBlock();
+  PrepareMIRs(mirs);
+  PerformTypeInference();
+
+  ASSERT_EQ(arraysize(mirs), mir_count_);
+  static const SRegExpectation expectations[] = {
+      { 0u, kExpectCore | kExpectNarrow },
+      { 0u, kExpectCore | kExpectNarrow },
+      { 0u, kExpectFp | kExpectWide },
+      { 0u, kExpectFp | kExpectNarrow },
+      { 0u, kExpectCore | kExpectNarrow },
+      { 0u, kExpectCore | kExpectWide },
+      { 0u, kExpectCore | kExpectNarrow },
+      { 0u, kExpectCore | kExpectNarrow },
+      { 0u, kExpectRef | kExpectNarrow },
+      { 1u, kExpectRef | kExpectNarrow | kExpectArrayRef | kExpectArrayNarrow },
+  };
+  static_assert(arraysize(expectations) == arraysize(mirs), "array size mismatch");
+  for (size_t i = 0; i != arraysize(expectations); ++i) {
+    EXPECT_EQ(mirs[i].opcode, mirs_[i].dalvikInsn.opcode);
+    ASSERT_LE(1u, mirs_[i].ssa_rep->num_defs);
+    ExpectSRegType(mirs_[i].ssa_rep->defs[0], expectations[i]);
+  }
+  EXPECT_EQ(cu_.disable_opt & (1u << kPromoteRegs), 0u);
+  EXPECT_FALSE(cu_.mir_graph->PuntToInterpreter());
+}
+
+TEST_F(TypeInferenceTest, IPut) {
+  static const FieldDef ifields[] = {
+      { kClassName, "B", "byteField" },
+      { kClassName, "C", "charField" },
+      { kClassName, "D", "doubleField" },
+      { kClassName, "F", "floatField" },
+      { kClassName, "I", "intField" },
+      { kClassName, "J", "longField" },
+      { kClassName, "S", "shortField" },
+      { kClassName, "Z", "booleanField" },
+      { kClassName, "Ljava/lang/Object;", "objectField" },
+      { kClassName, "[Ljava/lang/Object;", "objectArrayField" },
+  };
+  constexpr uint32_t thiz = kLocalVRs;
+  static const MIRDef mirs[] = {
+      DEF_CONST(3u, Instruction::CONST, 0u, 0),
+      DEF_IPUT(3u, Instruction::IPUT_BYTE, 0u, thiz, 0u),
+      DEF_CONST(3u, Instruction::CONST, 1u, 0),
+      DEF_IPUT(3u, Instruction::IPUT_CHAR, 1u, thiz, 1u),
+      DEF_CONST_WIDE(3u, Instruction::CONST_WIDE, 2u, 0),
+      DEF_IPUT_WIDE(3u, Instruction::IPUT_WIDE, 2u, thiz, 2u),
+      DEF_CONST(3u, Instruction::CONST, 4u, 0),
+      DEF_IPUT(3u, Instruction::IPUT, 4u, thiz, 3u),
+      DEF_CONST(3u, Instruction::CONST, 5u, 0),
+      DEF_IPUT(3u, Instruction::IPUT, 5u, thiz, 4u),
+      DEF_CONST_WIDE(3u, Instruction::CONST_WIDE, 6u, 0),
+      DEF_IPUT_WIDE(3u, Instruction::IPUT_WIDE, 6u, thiz, 5u),
+      DEF_CONST(3u, Instruction::CONST, 8u, 0),
+      DEF_IPUT(3u, Instruction::IPUT_SHORT, 8u, thiz, 6u),
+      DEF_CONST(3u, Instruction::CONST, 9u, 0),
+      DEF_IPUT(3u, Instruction::IPUT_BOOLEAN, 9u, thiz, 7u),
+      DEF_CONST(3u, Instruction::CONST, 10u, 0),
+      DEF_IPUT(3u, Instruction::IPUT_OBJECT, 10u, thiz, 8u),
+      DEF_CONST(3u, Instruction::CONST, 11u, 0),
+      DEF_IPUT(3u, Instruction::IPUT_OBJECT, 11u, thiz, 9u),
+  };
+
+  PrepareIFields(ifields);
+  BuildDexFile("()V", false);
+  PrepareSingleBlock();
+  PrepareMIRs(mirs);
+  PerformTypeInference();
+
+  ASSERT_EQ(arraysize(mirs), mir_count_);
+  static const SRegExpectation expectations[] = {
+      // One expectation for every 2 MIRs.
+      { 0u, kExpectCore | kExpectNarrow },
+      { 0u, kExpectCore | kExpectNarrow },
+      { 0u, kExpectFp | kExpectWide },
+      { 0u, kExpectFp | kExpectNarrow },
+      { 0u, kExpectCore | kExpectNarrow },
+      { 0u, kExpectCore | kExpectWide },
+      { 0u, kExpectCore | kExpectNarrow },
+      { 0u, kExpectCore | kExpectNarrow },
+      { 0u, kExpectRef | kExpectNarrow | kExpectNull },
+      { 1u, kExpectRef | kExpectNarrow | kExpectNull | kExpectArrayRef | kExpectArrayNarrow },
+  };
+  static_assert(2 * arraysize(expectations) == arraysize(mirs), "array size mismatch");
+  for (size_t i = 0; i != arraysize(expectations); ++i) {
+    EXPECT_EQ(mirs[2 * i].opcode, mirs_[2 * i].dalvikInsn.opcode);
+    EXPECT_EQ(mirs[2 * i + 1].opcode, mirs_[2 * i + 1].dalvikInsn.opcode);
+    ASSERT_LE(1u, mirs_[2 * i].ssa_rep->num_defs);
+    ExpectSRegType(mirs_[2 * i].ssa_rep->defs[0], expectations[i]);
+  }
+  EXPECT_EQ(cu_.disable_opt & (1u << kPromoteRegs), 0u);
+  EXPECT_FALSE(cu_.mir_graph->PuntToInterpreter());
+}
+
+TEST_F(TypeInferenceTest, SPut) {
+  static const FieldDef sfields[] = {
+      { kClassName, "B", "staticByteField" },
+      { kClassName, "C", "staticCharField" },
+      { kClassName, "D", "staticDoubleField" },
+      { kClassName, "F", "staticFloatField" },
+      { kClassName, "I", "staticIntField" },
+      { kClassName, "J", "staticLongField" },
+      { kClassName, "S", "staticShortField" },
+      { kClassName, "Z", "staticBooleanField" },
+      { kClassName, "Ljava/lang/Object;", "staticObjectField" },
+      { kClassName, "[Ljava/lang/Object;", "staticObjectArrayField" },
+  };
+  static const MIRDef mirs[] = {
+      DEF_CONST(3u, Instruction::CONST, 0u, 0),
+      DEF_SPUT(3u, Instruction::SPUT_BYTE, 0u, 0u),
+      DEF_CONST(3u, Instruction::CONST, 1u, 0),
+      DEF_SPUT(3u, Instruction::SPUT_CHAR, 1u, 1u),
+      DEF_CONST_WIDE(3u, Instruction::CONST_WIDE, 2u, 0),
+      DEF_SPUT_WIDE(3u, Instruction::SPUT_WIDE, 2u, 2u),
+      DEF_CONST(3u, Instruction::CONST, 4u, 0),
+      DEF_SPUT(3u, Instruction::SPUT, 4u, 3u),
+      DEF_CONST(3u, Instruction::CONST, 5u, 0),
+      DEF_SPUT(3u, Instruction::SPUT, 5u, 4u),
+      DEF_CONST_WIDE(3u, Instruction::CONST_WIDE, 6u, 0),
+      DEF_SPUT_WIDE(3u, Instruction::SPUT_WIDE, 6u, 5u),
+      DEF_CONST(3u, Instruction::CONST, 8u, 0),
+      DEF_SPUT(3u, Instruction::SPUT_SHORT, 8u, 6u),
+      DEF_CONST(3u, Instruction::CONST, 9u, 0),
+      DEF_SPUT(3u, Instruction::SPUT_BOOLEAN, 9u, 7u),
+      DEF_CONST(3u, Instruction::CONST, 10u, 0),
+      DEF_SPUT(3u, Instruction::SPUT_OBJECT, 10u, 8u),
+      DEF_CONST(3u, Instruction::CONST, 11u, 0),
+      DEF_SPUT(3u, Instruction::SPUT_OBJECT, 11u, 9u),
+  };
+
+  PrepareSFields(sfields);
+  BuildDexFile("()V", true);
+  PrepareSingleBlock();
+  PrepareMIRs(mirs);
+  PerformTypeInference();
+
+  ASSERT_EQ(arraysize(mirs), mir_count_);
+  static const SRegExpectation expectations[] = {
+      // One expectation for every 2 MIRs.
+      { 0u, kExpectCore | kExpectNarrow },
+      { 0u, kExpectCore | kExpectNarrow },
+      { 0u, kExpectFp | kExpectWide },
+      { 0u, kExpectFp | kExpectNarrow },
+      { 0u, kExpectCore | kExpectNarrow },
+      { 0u, kExpectCore | kExpectWide },
+      { 0u, kExpectCore | kExpectNarrow },
+      { 0u, kExpectCore | kExpectNarrow },
+      { 0u, kExpectRef | kExpectNarrow | kExpectNull },
+      { 1u, kExpectRef | kExpectNarrow | kExpectNull | kExpectArrayRef | kExpectArrayNarrow },
+  };
+  static_assert(2 * arraysize(expectations) == arraysize(mirs), "array size mismatch");
+  for (size_t i = 0; i != arraysize(expectations); ++i) {
+    EXPECT_EQ(mirs[2 * i].opcode, mirs_[2 * i].dalvikInsn.opcode);
+    EXPECT_EQ(mirs[2 * i + 1].opcode, mirs_[2 * i + 1].dalvikInsn.opcode);
+    ASSERT_LE(1u, mirs_[2 * i].ssa_rep->num_defs);
+    ExpectSRegType(mirs_[2 * i].ssa_rep->defs[0], expectations[i]);
+  }
+  EXPECT_EQ(cu_.disable_opt & (1u << kPromoteRegs), 0u);
+  EXPECT_FALSE(cu_.mir_graph->PuntToInterpreter());
+}
+
+TEST_F(TypeInferenceTest, MethodReturnType) {
+  static const MethodDef methods[] = {
+      { kClassName, "()B", "byteFoo", kStatic },
+      { kClassName, "()C", "charFoo", kStatic },
+      { kClassName, "()D", "doubleFoo", kStatic },
+      { kClassName, "()F", "floatFoo", kStatic },
+      { kClassName, "()I", "intFoo", kStatic },
+      { kClassName, "()J", "longFoo", kStatic },
+      { kClassName, "()S", "shortFoo", kStatic },
+      { kClassName, "()Z", "booleanFoo", kStatic },
+      { kClassName, "()Ljava/lang/Object;", "objectFoo", kStatic },
+      { kClassName, "()[Ljava/lang/Object;", "objectArrayFoo", kStatic },
+  };
+  static const MIRDef mirs[] = {
+      DEF_INVOKE0(3u, Instruction::INVOKE_STATIC, 0u),
+      DEF_NULOP(3u, Instruction::MOVE_RESULT, 0u),
+      DEF_INVOKE0(3u, Instruction::INVOKE_STATIC, 1u),
+      DEF_NULOP(3u, Instruction::MOVE_RESULT, 1u),
+      DEF_INVOKE0(3u, Instruction::INVOKE_STATIC, 2u),
+      DEF_NULOP_WIDE(3u, Instruction::MOVE_RESULT_WIDE, 2u),
+      DEF_INVOKE0(3u, Instruction::INVOKE_STATIC, 3u),
+      DEF_NULOP(3u, Instruction::MOVE_RESULT, 4u),
+      DEF_INVOKE0(3u, Instruction::INVOKE_STATIC, 4u),
+      DEF_NULOP(3u, Instruction::MOVE_RESULT, 5u),
+      DEF_INVOKE0(3u, Instruction::INVOKE_STATIC, 5u),
+      DEF_NULOP_WIDE(3u, Instruction::MOVE_RESULT_WIDE, 6u),
+      DEF_INVOKE0(3u, Instruction::INVOKE_STATIC, 6u),
+      DEF_NULOP(3u, Instruction::MOVE_RESULT, 8u),
+      DEF_INVOKE0(3u, Instruction::INVOKE_STATIC, 7u),
+      DEF_NULOP(3u, Instruction::MOVE_RESULT, 9u),
+      DEF_INVOKE0(3u, Instruction::INVOKE_STATIC, 8u),
+      DEF_NULOP(3u, Instruction::MOVE_RESULT_OBJECT, 10u),
+      DEF_INVOKE0(3u, Instruction::INVOKE_STATIC, 9u),
+      DEF_NULOP(3u, Instruction::MOVE_RESULT_OBJECT, 11u),
+  };
+
+  PrepareMethods(methods);
+  BuildDexFile("()V", true);
+  PrepareSingleBlock();
+  PrepareMIRs(mirs);
+  PerformTypeInference();
+
+  ASSERT_EQ(arraysize(mirs), mir_count_);
+  static const SRegExpectation expectations[] = {
+      // One expectation for every 2 MIRs.
+      { 0u, kExpectCore | kExpectNarrow },
+      { 0u, kExpectCore | kExpectNarrow },
+      { 0u, kExpectFp | kExpectWide },
+      { 0u, kExpectFp | kExpectNarrow },
+      { 0u, kExpectCore | kExpectNarrow },
+      { 0u, kExpectCore | kExpectWide },
+      { 0u, kExpectCore | kExpectNarrow },
+      { 0u, kExpectCore | kExpectNarrow },
+      { 0u, kExpectRef | kExpectNarrow },
+      { 1u, kExpectRef | kExpectNarrow | kExpectArrayRef | kExpectArrayNarrow },
+  };
+  static_assert(2 * arraysize(expectations) == arraysize(mirs), "array size mismatch");
+  for (size_t i = 0; i != arraysize(expectations); ++i) {
+    EXPECT_EQ(mirs[2 * i].opcode, mirs_[2 * i].dalvikInsn.opcode);
+    EXPECT_EQ(mirs[2 * i + 1].opcode, mirs_[2 * i + 1].dalvikInsn.opcode);
+    ASSERT_LE(1u, mirs_[2 * i + 1].ssa_rep->num_defs);
+    ExpectSRegType(mirs_[2 * i + 1].ssa_rep->defs[0], expectations[i]);
+  }
+  EXPECT_EQ(cu_.disable_opt & (1u << kPromoteRegs), 0u);
+  EXPECT_FALSE(cu_.mir_graph->PuntToInterpreter());
+}
+
+TEST_F(TypeInferenceTest, MethodArgType) {
+  static const MethodDef methods[] = {
+      { kClassName, "(B)V", "fooByte", kStatic },
+      { kClassName, "(C)V", "fooChar", kStatic },
+      { kClassName, "(D)V", "fooDouble", kStatic },
+      { kClassName, "(F)V", "fooFloat", kStatic },
+      { kClassName, "(I)V", "fooInt", kStatic },
+      { kClassName, "(J)V", "fooLong", kStatic },
+      { kClassName, "(S)V", "fooShort", kStatic },
+      { kClassName, "(Z)V", "fooBoolean", kStatic },
+      { kClassName, "(Ljava/lang/Object;)V", "fooObject", kStatic },
+      { kClassName, "([Ljava/lang/Object;)V", "fooObjectArray", kStatic },
+  };
+  static const MIRDef mirs[] = {
+      DEF_CONST(3u, Instruction::CONST, 0u, 0),
+      DEF_INVOKE1(3u, Instruction::INVOKE_STATIC, 0u, 0u),
+      DEF_CONST(3u, Instruction::CONST, 1u, 0),
+      DEF_INVOKE1(3u, Instruction::INVOKE_STATIC, 1u, 1u),
+      DEF_CONST_WIDE(3u, Instruction::CONST_WIDE, 2u, 0),
+      DEF_INVOKE2(3u, Instruction::INVOKE_STATIC, 2u, 3u, 2u),
+      DEF_CONST(3u, Instruction::CONST, 4u, 0),
+      DEF_INVOKE1(3u, Instruction::INVOKE_STATIC, 4u, 3u),
+      DEF_CONST(3u, Instruction::CONST, 5u, 0),
+      DEF_INVOKE1(3u, Instruction::INVOKE_STATIC, 5u, 4u),
+      DEF_CONST_WIDE(3u, Instruction::CONST_WIDE, 6u, 0),
+      DEF_INVOKE2(3u, Instruction::INVOKE_STATIC, 6u, 7u, 5u),
+      DEF_CONST(3u, Instruction::CONST, 8u, 0),
+      DEF_INVOKE1(3u, Instruction::INVOKE_STATIC, 8u, 6u),
+      DEF_CONST(3u, Instruction::CONST, 9u, 0),
+      DEF_INVOKE1(3u, Instruction::INVOKE_STATIC, 9u, 7u),
+      DEF_CONST(3u, Instruction::CONST, 10u, 0),
+      DEF_INVOKE1(3u, Instruction::INVOKE_STATIC, 10u, 8u),
+      DEF_CONST(3u, Instruction::CONST, 11u, 0),
+      DEF_INVOKE1(3u, Instruction::INVOKE_STATIC, 11u, 9u),
+  };
+
+  PrepareMethods(methods);
+  BuildDexFile("()V", true);
+  PrepareSingleBlock();
+  PrepareMIRs(mirs);
+  PerformTypeInference();
+
+  ASSERT_EQ(arraysize(mirs), mir_count_);
+  static const SRegExpectation expectations[] = {
+      // One expectation for every 2 MIRs.
+      { 0u, kExpectCore | kExpectNarrow },
+      { 0u, kExpectCore | kExpectNarrow },
+      { 0u, kExpectFp | kExpectWide },
+      { 0u, kExpectFp | kExpectNarrow },
+      { 0u, kExpectCore | kExpectNarrow },
+      { 0u, kExpectCore | kExpectWide },
+      { 0u, kExpectCore | kExpectNarrow },
+      { 0u, kExpectCore | kExpectNarrow },
+      { 0u, kExpectRef | kExpectNarrow | kExpectNull },
+      { 1u, kExpectRef | kExpectNarrow | kExpectNull | kExpectArrayRef | kExpectArrayNarrow },
+  };
+  static_assert(2 * arraysize(expectations) == arraysize(mirs), "array size mismatch");
+  for (size_t i = 0; i != arraysize(expectations); ++i) {
+    EXPECT_EQ(mirs[2 * i].opcode, mirs_[2 * i].dalvikInsn.opcode);
+    EXPECT_EQ(mirs[2 * i + 1].opcode, mirs_[2 * i + 1].dalvikInsn.opcode);
+    ASSERT_LE(1u, mirs_[2 * i].ssa_rep->num_defs);
+    ExpectSRegType(mirs_[2 * i].ssa_rep->defs[0], expectations[i]);
+  }
+  EXPECT_EQ(cu_.disable_opt & (1u << kPromoteRegs), 0u);
+  EXPECT_FALSE(cu_.mir_graph->PuntToInterpreter());
+}
+
+TEST_F(TypeInferenceTest, APut1) {
+  static const MIRDef mirs[] = {
+      DEF_CONST(3u, Instruction::CONST, 0u, 0),  // Object[] array
+      DEF_CONST(3u, Instruction::CONST, 1u, 0),  // value; can't even determine whether core or fp.
+      DEF_CONST(3u, Instruction::CONST, 2u, 0),  // index
+      DEF_APUT(3u, Instruction::APUT, 1u, 0u, 2u),
+  };
+
+  BuildDexFile("()V", true);
+  PrepareSingleBlock();
+  PrepareMIRs(mirs);
+  PerformTypeInference();
+
+  ASSERT_EQ(arraysize(mirs), mir_count_);
+  static const SRegExpectation expectations[] = {
+      { 1u, kExpectRef | kExpectNarrow | kExpectNull | kExpectArrayNarrow },
+      { 0u, kExpectNarrow },
+      { 0u, kExpectCore | kExpectNarrow },
+  };
+  for (int32_t sreg = 0; sreg != arraysize(expectations); ++sreg) {
+    ExpectSRegType(sreg, expectations[sreg]);
+  }
+  EXPECT_EQ(cu_.disable_opt & (1u << kPromoteRegs), 0u);
+  EXPECT_FALSE(cu_.mir_graph->PuntToInterpreter());
+}
+
+TEST_F(TypeInferenceTest, APut2) {
+  static const MIRDef mirs[] = {
+      DEF_CONST(3u, Instruction::CONST, 0u, 0),  // Object[] array
+      DEF_CONST(3u, Instruction::CONST, 1u, 0),  // Object[] value
+      DEF_CONST(3u, Instruction::CONST, 2u, 0),  // index
+      DEF_APUT(3u, Instruction::APUT_OBJECT, 1u, 0u, 2u),
+  };
+
+  BuildDexFile("()V", true);
+  PrepareSingleBlock();
+  PrepareMIRs(mirs);
+  PerformTypeInference();
+
+  ASSERT_EQ(arraysize(mirs), mir_count_);
+  static const SRegExpectation expectations[] = {
+      { 1u, kExpectRef | kExpectNarrow | kExpectNull | kExpectArrayRef | kExpectArrayNarrow },
+      { 0u, kExpectRef | kExpectNarrow | kExpectNull },
+      { 0u, kExpectCore | kExpectNarrow },
+  };
+  for (int32_t sreg = 0; sreg != arraysize(expectations); ++sreg) {
+    ExpectSRegType(sreg, expectations[sreg]);
+  }
+  EXPECT_EQ(cu_.disable_opt & (1u << kPromoteRegs), 0u);
+  EXPECT_FALSE(cu_.mir_graph->PuntToInterpreter());
+}
+
+TEST_F(TypeInferenceTest, APut3) {
+  static const MIRDef mirs[] = {
+      // Either array1 or array2 could be Object[][] but there is no way to tell from the bytecode.
+      DEF_CONST(3u, Instruction::CONST, 0u, 0),  // Object[] array1
+      DEF_CONST(3u, Instruction::CONST, 1u, 0),  // Object[] array2
+      DEF_CONST(3u, Instruction::CONST, 2u, 0),  // index
+      DEF_APUT(3u, Instruction::APUT_OBJECT, 0u, 1u, 2u),
+      DEF_APUT(3u, Instruction::APUT_OBJECT, 1u, 0u, 2u),
+  };
+
+  BuildDexFile("()V", true);
+  PrepareSingleBlock();
+  PrepareMIRs(mirs);
+  PerformTypeInference();
+
+  ASSERT_EQ(arraysize(mirs), mir_count_);
+  static const SRegExpectation expectations[] = {
+      { 1u, kExpectRef | kExpectNarrow | kExpectNull | kExpectArrayRef | kExpectArrayNarrow },
+      { 1u, kExpectRef | kExpectNarrow | kExpectNull | kExpectArrayRef | kExpectArrayNarrow },
+      { 0u, kExpectCore | kExpectNarrow },
+  };
+  for (int32_t sreg = 0; sreg != arraysize(expectations); ++sreg) {
+    ExpectSRegType(sreg, expectations[sreg]);
+  }
+  EXPECT_EQ(cu_.disable_opt & (1u << kPromoteRegs), 0u);
+  EXPECT_FALSE(cu_.mir_graph->PuntToInterpreter());
+}
+
+TEST_F(TypeInferenceTest, APut4) {
+  static const MIRDef mirs[] = {
+      DEF_CONST(3u, Instruction::CONST, 0u, 0),
+      DEF_CONST(3u, Instruction::CONST, 1u, 0),  // index
+      DEF_AGET(3u, Instruction::AGET_OBJECT, 2u, 0u, 1u),  // Object[] array
+      DEF_CONST(3u, Instruction::CONST, 3u, 0),  // value; can't even determine whether core or fp.
+      DEF_APUT(3u, Instruction::APUT, 3u, 2u, 1u),
+  };
+
+  BuildDexFile("()V", true);
+  PrepareSingleBlock();
+  PrepareMIRs(mirs);
+  PerformTypeInference();
+
+  ASSERT_EQ(arraysize(mirs), mir_count_);
+  static const SRegExpectation expectations[] = {
+      { 1u, kExpectRef | kExpectNarrow | kExpectNull | kExpectArrayRef | kExpectArrayNarrow },
+      { 0u, kExpectCore | kExpectNarrow },
+      { 1u, kExpectRef | kExpectNarrow | kExpectArrayNarrow },
+      { 0u, kExpectNarrow },
+  };
+  for (int32_t sreg = 0; sreg != arraysize(expectations); ++sreg) {
+    ExpectSRegType(sreg, expectations[sreg]);
+  }
+  EXPECT_EQ(cu_.disable_opt & (1u << kPromoteRegs), 0u);
+  EXPECT_FALSE(cu_.mir_graph->PuntToInterpreter());
+}
+
+TEST_F(TypeInferenceTest, APut5) {
+  static const MIRDef mirs[] = {
+      DEF_CONST(3u, Instruction::CONST, 0u, 0),
+      DEF_CONST(3u, Instruction::CONST, 1u, 0),  // index
+      DEF_AGET(3u, Instruction::AGET_OBJECT, 2u, 0u, 1u),  // Object[] array
+      DEF_CONST(3u, Instruction::CONST, 3u, 0),  // Object[] value
+      DEF_APUT(3u, Instruction::APUT_OBJECT, 3u, 2u, 1u),
+  };
+
+  BuildDexFile("()V", true);
+  PrepareSingleBlock();
+  PrepareMIRs(mirs);
+  PerformTypeInference();
+
+  ASSERT_EQ(arraysize(mirs), mir_count_);
+  static const SRegExpectation expectations[] = {
+      { 1u, kExpectRef | kExpectNarrow | kExpectNull | kExpectArrayRef | kExpectArrayNarrow },
+      { 0u, kExpectCore | kExpectNarrow },
+      { 1u, kExpectRef | kExpectNarrow | kExpectArrayRef | kExpectArrayNarrow },
+      { 0u, kExpectRef | kExpectNarrow | kExpectNull },
+  };
+  for (int32_t sreg = 0; sreg != arraysize(expectations); ++sreg) {
+    ExpectSRegType(sreg, expectations[sreg]);
+  }
+  EXPECT_EQ(cu_.disable_opt & (1u << kPromoteRegs), 0u);
+  EXPECT_FALSE(cu_.mir_graph->PuntToInterpreter());
+}
+
+TEST_F(TypeInferenceTest, APut6) {
+  static const MIRDef mirs[] = {
+      DEF_CONST(3u, Instruction::CONST, 0u, 0),
+      DEF_CONST(3u, Instruction::CONST, 1u, 0),  // index
+      // Either array1 or array2 could be Object[][] but there is no way to tell from the bytecode.
+      DEF_AGET(3u, Instruction::AGET_OBJECT, 2u, 0u, 1u),  // Object[] array1
+      DEF_AGET(3u, Instruction::AGET_OBJECT, 3u, 0u, 1u),  // Object[] array2
+      DEF_APUT(3u, Instruction::APUT_OBJECT, 2u, 3u, 1u),
+      DEF_APUT(3u, Instruction::APUT_OBJECT, 3u, 2u, 1u),
+  };
+
+  BuildDexFile("()V", true);
+  PrepareSingleBlock();
+  PrepareMIRs(mirs);
+  PerformTypeInference();
+
+  ASSERT_EQ(arraysize(mirs), mir_count_);
+  static const SRegExpectation expectations[] = {
+      { 1u, kExpectRef | kExpectNarrow | kExpectNull | kExpectArrayRef | kExpectArrayNarrow },
+      { 0u, kExpectCore | kExpectNarrow },
+      { 1u, kExpectRef | kExpectNarrow | kExpectArrayRef | kExpectArrayNarrow },
+      { 1u, kExpectRef | kExpectNarrow | kExpectArrayRef | kExpectArrayNarrow },
+  };
+  for (int32_t sreg = 0; sreg != arraysize(expectations); ++sreg) {
+    ExpectSRegType(sreg, expectations[sreg]);
+  }
+  EXPECT_EQ(cu_.disable_opt & (1u << kPromoteRegs), 0u);
+  EXPECT_FALSE(cu_.mir_graph->PuntToInterpreter());
+}
+
+TEST_F(TypeInferenceTest, TwoNullObjectArraysInLoop) {
+  static const MIRDef mirs[] = {
+      // void foo() {
+      //   Object[] array1 = ((Object[])null)[0];
+      //   Object[] array2 = ((Object[])null)[0];
+      //   for (int i = 0; i != 3; ++i) {
+      //     Object[] a1 = null;  // One of these could be Object[][] but not both.
+      //     Object[] a2 = null;  // But they will be deduced as Object[].
+      //     try { a1[0] = a2; } catch (Throwable ignored) { }
+      //     try { a2[0] = a1; } catch (Throwable ignored) { }
+      //     array1 = a1;
+      //     array2 = a2;
+      //   }
+      // }
+      //
+      // Omitting the try-catch:
+      DEF_CONST(3u, Instruction::CONST, 0u, 0),            // null
+      DEF_CONST(3u, Instruction::CONST, 1u, 0),            // index
+      DEF_AGET(3u, Instruction::AGET_OBJECT, 2u, 0u, 1u),  // array1
+      DEF_AGET(3u, Instruction::AGET_OBJECT, 3u, 0u, 1u),  // array2
+      DEF_PHI2(4u, 4u, 2u, 8u),  // ? + [L -> [? gives [L (see array-length below)
+      DEF_PHI2(4u, 5u, 3u, 9u),  // ? + [L -> ? gives ?
+      DEF_AGET(4u, Instruction::AGET_OBJECT, 6u, 0u, 1u),  // a1
+      DEF_AGET(4u, Instruction::AGET_OBJECT, 7u, 0u, 1u),  // a2
+      DEF_APUT(4u, Instruction::APUT_OBJECT, 6u, 7u, 1u),
+      DEF_APUT(4u, Instruction::APUT_OBJECT, 7u, 6u, 1u),
+      DEF_MOVE(4u, Instruction::MOVE_OBJECT, 8u, 6u),
+      DEF_MOVE(4u, Instruction::MOVE_OBJECT, 9u, 7u),
+      DEF_UNOP(5u, Instruction::ARRAY_LENGTH, 10u, 4u),
+  };
+
+  BuildDexFile("()V", true);
+  PrepareLoop();
+  PrepareMIRs(mirs);
+  PerformTypeInference();
+
+  ASSERT_EQ(arraysize(mirs), mir_count_);
+  static const SRegExpectation expectations[] = {
+      { 1u, kExpectRef | kExpectNarrow | kExpectNull | kExpectArrayRef | kExpectArrayNarrow },
+      { 0u, kExpectCore | kExpectNarrow },
+      { 1u, kExpectRef | kExpectNarrow | kExpectArrayRef | kExpectArrayNarrow },
+      { 0u, kExpectRef | kExpectNarrow },
+      { 1u, kExpectRef | kExpectNarrow | kExpectArrayRef | kExpectArrayNarrow },
+      { 0u, kExpectRef | kExpectNarrow },
+      { 1u, kExpectRef | kExpectNarrow | kExpectArrayRef | kExpectArrayNarrow },
+      { 1u, kExpectRef | kExpectNarrow | kExpectArrayRef | kExpectArrayNarrow },
+      { 1u, kExpectRef | kExpectNarrow | kExpectArrayRef | kExpectArrayNarrow },
+      { 1u, kExpectRef | kExpectNarrow | kExpectArrayRef | kExpectArrayNarrow },
+      { 0u, kExpectCore | kExpectNarrow },
+  };
+  for (int32_t sreg = 0; sreg != arraysize(expectations); ++sreg) {
+    ExpectSRegType(sreg, expectations[sreg]);
+  }
+  EXPECT_EQ(cu_.disable_opt & (1u << kPromoteRegs), 0u);
+  EXPECT_FALSE(cu_.mir_graph->PuntToInterpreter());
+}
+
+TEST_F(TypeInferenceTest, ArrayArrayFloat) {
+  static const MethodDef methods[] = {
+      { kClassName, "(F)V", "fooFloat", kStatic },
+  };
+  static const MIRDef mirs[] = {
+      // void foo() {
+      //   try {
+      //     float[][][] aaaf = null;
+      //     float[][] array = aaaf[0];  // Make sure array is treated as properly typed.
+      //     array[0][0] = 0.0f;      // const + aget-object[1] + aput
+      //     fooFloat(array[0][0]);   // aget-object[2] + aget + invoke
+      //     // invoke: signature => input is F.
+      //     // aget: output is F => base is [F (precise)
+      //     // aget-object[2]: output is [F => base is [[F (precise)
+      //     // aput: unknown input type => base is [?
+      //     // aget-object[1]: base is [[F => result is L or [F, merge with [? => result is [F
+      //     // aput (again): base is [F => result is F
+      //     // const: F determined by the aput reprocessing.
+      //   } catch (Throwable ignored) {
+      //   }
+      // }
+      //
+      // Omitting the try-catch:
+      DEF_CONST(3u, Instruction::CONST, 0u, 0),             // 0
+      DEF_CONST(3u, Instruction::CONST, 1u, 0),             // aaaf
+      DEF_AGET(3u, Instruction::AGET_OBJECT, 2u, 1u, 0u),   // array = aaaf[0]
+      DEF_CONST(3u, Instruction::CONST, 3u, 0),             // 0.0f
+      DEF_AGET(3u, Instruction::AGET_OBJECT, 4u, 2u, 0u),   // array[0]
+      DEF_APUT(3u, Instruction::APUT, 3u, 4u, 0u),          // array[0][0] = 0.0f
+      DEF_AGET(3u, Instruction::AGET_OBJECT, 5u, 2u, 0u),   // array[0]
+      DEF_AGET(3u, Instruction::AGET, 6u, 5u, 0u),          // array[0][0]
+      DEF_INVOKE1(3u, Instruction::INVOKE_STATIC, 6u, 0u),  // fooFloat(array[0][0])
+  };
+
+  PrepareMethods(methods);
+  BuildDexFile("()V", true);
+  PrepareSingleBlock();
+  PrepareMIRs(mirs);
+  PerformTypeInference();
+
+  ASSERT_EQ(arraysize(mirs), mir_count_);
+  static const SRegExpectation expectations[] = {
+      { 0u, kExpectCore | kExpectNarrow },
+      { 1u, kExpectRef | kExpectNarrow | kExpectNull | kExpectArrayRef | kExpectArrayNarrow },
+      { 2u, kExpectRef | kExpectNarrow | kExpectArrayFp | kExpectArrayNarrow },
+      { 0u, kExpectFp | kExpectNarrow },
+      { 1u, kExpectRef | kExpectNarrow | kExpectArrayFp | kExpectArrayNarrow },
+      { 1u, kExpectRef | kExpectNarrow | kExpectArrayFp | kExpectArrayNarrow },
+      { 0u, kExpectFp | kExpectNarrow },
+  };
+  for (int32_t sreg = 0; sreg != arraysize(expectations); ++sreg) {
+    ExpectSRegType(sreg, expectations[sreg]);
+  }
+  EXPECT_EQ(cu_.disable_opt & (1u << kPromoteRegs), 0u);
+  EXPECT_FALSE(cu_.mir_graph->PuntToInterpreter());
+}
+
+TEST_F(TypeInferenceTest, CheckCast1) {
+  static const TypeDef types[] = {
+      { "[I" },
+  };
+  static const MIRDef mirs[] = {
+      DEF_CONST(3u, Instruction::CONST, 0u, 0),
+      DEF_CONST(3u, Instruction::CONST, 1u, 0),
+      DEF_AGET(3u, Instruction::AGET_OBJECT, 2u, 0u, 1u),
+      DEF_CHECK_CAST(4u, Instruction::CHECK_CAST, 2u, 0u),
+      DEF_CHECK_CAST(5u, Instruction::CHECK_CAST, 2u, 0u),
+      // Pseudo-phi from [I and [I into L infers only L but not [.
+      DEF_MOVE(6u, Instruction::MOVE_OBJECT, 3u, 2u),
+  };
+  PrepareTypes(types);
+  BuildDexFile("()V", true);
+  PrepareDiamond();
+  PrepareMIRs(mirs);
+  static const BasicBlockId v0_def_blocks[] = { 3u, 4u, 5u, 6u };
+  MapVRegToSReg(2, 2, v0_def_blocks);
+  PerformTypeInference();
+
+  ASSERT_EQ(arraysize(mirs), mir_count_);
+  static const SRegExpectation expectations[] = {
+      { 1u, kExpectRef | kExpectNarrow | kExpectNull | kExpectArrayRef | kExpectArrayNarrow },
+      { 0u, kExpectCore | kExpectNarrow },
+      { 0u, kExpectRef | kExpectNarrow },
+      { 0u, kExpectRef | kExpectNarrow },
+  };
+  for (int32_t sreg = 0; sreg != arraysize(expectations); ++sreg) {
+    ExpectSRegType(sreg, expectations[sreg]);
+  }
+  EXPECT_EQ(cu_.disable_opt & (1u << kPromoteRegs), 0u);
+  EXPECT_FALSE(cu_.mir_graph->PuntToInterpreter());
+}
+
+TEST_F(TypeInferenceTest, CheckCast2) {
+  static const TypeDef types[] = {
+      { "[I" },
+  };
+  static const MIRDef mirs[] = {
+      DEF_CONST(3u, Instruction::CONST, 0u, 0),
+      DEF_CONST(3u, Instruction::CONST, 1u, 0),
+      DEF_AGET(3u, Instruction::AGET_OBJECT, 2u, 0u, 1u),
+      DEF_CHECK_CAST(4u, Instruction::CHECK_CAST, 2u, 0u),
+      DEF_CHECK_CAST(5u, Instruction::CHECK_CAST, 2u, 0u),
+      // Pseudo-phi from [I and [I into [? infers [I.
+      DEF_MOVE(6u, Instruction::MOVE_OBJECT, 3u, 2u),
+      DEF_UNOP(6u, Instruction::ARRAY_LENGTH, 4u, 2u),
+  };
+  PrepareTypes(types);
+  BuildDexFile("()V", true);
+  PrepareDiamond();
+  PrepareMIRs(mirs);
+  static const BasicBlockId v0_def_blocks[] = { 3u, 4u, 5u, 6u };
+  MapVRegToSReg(2, 2, v0_def_blocks);
+  PerformTypeInference();
+
+  ASSERT_EQ(arraysize(mirs), mir_count_);
+  static const SRegExpectation expectations[] = {
+      { 1u, kExpectRef | kExpectNarrow | kExpectNull | kExpectArrayRef | kExpectArrayNarrow },
+      { 0u, kExpectCore | kExpectNarrow },
+      { 0u, kExpectRef | kExpectNarrow },
+      { 1u, kExpectRef | kExpectNarrow | kExpectArrayCore | kExpectArrayNarrow },
+      { 0u, kExpectCore | kExpectNarrow },
+  };
+  for (int32_t sreg = 0; sreg != arraysize(expectations); ++sreg) {
+    ExpectSRegType(sreg, expectations[sreg]);
+  }
+  EXPECT_EQ(cu_.disable_opt & (1u << kPromoteRegs), 0u);
+  EXPECT_FALSE(cu_.mir_graph->PuntToInterpreter());
+}
+
+TEST_F(TypeInferenceTest, CheckCast3) {
+  static const TypeDef types[] = {
+      { "[I" },
+      { "[F" },
+  };
+  static const MIRDef mirs[] = {
+      DEF_CONST(3u, Instruction::CONST, 0u, 0),
+      DEF_CONST(3u, Instruction::CONST, 1u, 0),
+      DEF_AGET(3u, Instruction::AGET_OBJECT, 2u, 0u, 1u),
+      DEF_CHECK_CAST(4u, Instruction::CHECK_CAST, 2u, 0u),
+      DEF_CHECK_CAST(5u, Instruction::CHECK_CAST, 2u, 1u),
+      // Pseudo-phi from [I and [F into L correctly leaves it as L.
+      DEF_MOVE(6u, Instruction::MOVE_OBJECT, 3u, 2u),
+  };
+  PrepareTypes(types);
+  BuildDexFile("()V", true);
+  PrepareDiamond();
+  PrepareMIRs(mirs);
+  static const BasicBlockId v0_def_blocks[] = { 3u, 4u, 5u, 6u };
+  MapVRegToSReg(2, 2, v0_def_blocks);
+  PerformTypeInference();
+
+  ASSERT_EQ(arraysize(mirs), mir_count_);
+  static const SRegExpectation expectations[] = {
+      { 1u, kExpectRef | kExpectNarrow | kExpectNull | kExpectArrayRef | kExpectArrayNarrow },
+      { 0u, kExpectCore | kExpectNarrow },
+      { 0u, kExpectRef | kExpectNarrow },
+      { 0u, kExpectRef | kExpectNarrow },
+  };
+  for (int32_t sreg = 0; sreg != arraysize(expectations); ++sreg) {
+    ExpectSRegType(sreg, expectations[sreg]);
+  }
+  EXPECT_EQ(cu_.disable_opt & (1u << kPromoteRegs), 0u);
+  EXPECT_FALSE(cu_.mir_graph->PuntToInterpreter());
+}
+
+TEST_F(TypeInferenceTest, CheckCastConflict1) {
+  static const TypeDef types[] = {
+      { "[I" },
+      { "[F" },
+  };
+  static const MIRDef mirs[] = {
+      DEF_CONST(3u, Instruction::CONST, 0u, 0),
+      DEF_CONST(3u, Instruction::CONST, 1u, 0),
+      DEF_AGET(3u, Instruction::AGET_OBJECT, 2u, 0u, 1u),
+      DEF_CHECK_CAST(4u, Instruction::CHECK_CAST, 2u, 0u),
+      DEF_CHECK_CAST(5u, Instruction::CHECK_CAST, 2u, 1u),
+      // Pseudo-phi from [I and [F into [? infers conflict [I/[F.
+      DEF_MOVE(6u, Instruction::MOVE_OBJECT, 3u, 2u),
+      DEF_UNOP(6u, Instruction::ARRAY_LENGTH, 4u, 2u),
+  };
+  PrepareTypes(types);
+  BuildDexFile("()V", true);
+  PrepareDiamond();
+  PrepareMIRs(mirs);
+  static const BasicBlockId v0_def_blocks[] = { 3u, 4u, 5u, 6u };
+  MapVRegToSReg(2, 2, v0_def_blocks);
+  PerformTypeInference();
+
+  ASSERT_EQ(arraysize(mirs), mir_count_);
+  static const SRegExpectation expectations[] = {
+      { 1u, kExpectRef | kExpectNarrow | kExpectNull | kExpectArrayRef | kExpectArrayNarrow },
+      { 0u, kExpectCore | kExpectNarrow },
+      { 0u, kExpectRef | kExpectNarrow },
+      { 1u, kExpectRef | kExpectNarrow | kExpectArrayCore | kExpectArrayFp | kExpectArrayNarrow },
+      { 0u, kExpectCore | kExpectNarrow },
+  };
+  for (int32_t sreg = 0; sreg != arraysize(expectations); ++sreg) {
+    ExpectSRegType(sreg, expectations[sreg], false);
+  }
+  // The type conflict in array element wasn't propagated to an SSA reg.
+  EXPECT_EQ(cu_.disable_opt & (1u << kPromoteRegs), 0u);
+  EXPECT_FALSE(cu_.mir_graph->PuntToInterpreter());
+}
+
+TEST_F(TypeInferenceTest, CheckCastConflict2) {
+  static const TypeDef types[] = {
+      { "[I" },
+      { "[F" },
+  };
+  static const MIRDef mirs[] = {
+      DEF_CONST(3u, Instruction::CONST, 0u, 0),
+      DEF_CONST(3u, Instruction::CONST, 1u, 0),
+      DEF_AGET(3u, Instruction::AGET_OBJECT, 2u, 0u, 1u),
+      DEF_CHECK_CAST(4u, Instruction::CHECK_CAST, 2u, 0u),
+      DEF_CHECK_CAST(5u, Instruction::CHECK_CAST, 2u, 1u),
+      // Pseudo-phi from [I and [F into [? infers conflict [I/[F.
+      DEF_MOVE(6u, Instruction::MOVE_OBJECT, 3u, 2u),
+      DEF_AGET(6u, Instruction::AGET, 4u, 2u, 1u),
+  };
+  PrepareTypes(types);
+  BuildDexFile("()V", true);
+  PrepareDiamond();
+  PrepareMIRs(mirs);
+  static const BasicBlockId v0_def_blocks[] = { 3u, 4u, 5u, 6u };
+  MapVRegToSReg(2, 2, v0_def_blocks);
+  PerformTypeInference();
+
+  ASSERT_EQ(arraysize(mirs), mir_count_);
+  static const SRegExpectation expectations[] = {
+      { 1u, kExpectRef | kExpectNarrow | kExpectNull | kExpectArrayRef | kExpectArrayNarrow },
+      { 0u, kExpectCore | kExpectNarrow },
+      { 0u, kExpectRef | kExpectNarrow },
+      { 1u, kExpectRef | kExpectNarrow | kExpectArrayCore | kExpectArrayFp | kExpectArrayNarrow },
+      { 0u, kExpectCore | kExpectFp | kExpectNarrow },
+  };
+  for (int32_t sreg = 0; sreg != arraysize(expectations); ++sreg) {
+    ExpectSRegType(sreg, expectations[sreg], false);
+  }
+  // Type conflict in an SSA reg, register promotion disabled.
+  EXPECT_NE(cu_.disable_opt & (1u << kPromoteRegs), 0u);
+  EXPECT_FALSE(cu_.mir_graph->PuntToInterpreter());
+}
+
+TEST_F(TypeInferenceTest, Phi1) {
+  static const TypeDef types[] = {
+      { "[I" },
+  };
+  static const MIRDef mirs[] = {
+      DEF_CONST(3u, Instruction::CONST, 0u, 100),
+      DEF_NEW_ARRAY(4u, Instruction::NEW_ARRAY, 1u, 0u, 0u),
+      DEF_NEW_ARRAY(5u, Instruction::NEW_ARRAY, 2u, 0u, 0u),
+      // Phi from [I and [I infers only L but not [.
+      DEF_PHI2(6u, 3u, 1u, 2u),
+  };
+  PrepareTypes(types);
+  BuildDexFile("()V", true);
+  PrepareDiamond();
+  PrepareMIRs(mirs);
+  PerformTypeInference();
+
+  ASSERT_EQ(arraysize(mirs), mir_count_);
+  static const SRegExpectation expectations[] = {
+      { 0u, kExpectCore | kExpectNarrow },
+      { 1u, kExpectRef | kExpectNarrow | kExpectArrayCore | kExpectArrayNarrow },
+      { 1u, kExpectRef | kExpectNarrow | kExpectArrayCore | kExpectArrayNarrow },
+      { 0u, kExpectRef | kExpectNarrow },
+  };
+  for (int32_t sreg = 0; sreg != arraysize(expectations); ++sreg) {
+    ExpectSRegType(sreg, expectations[sreg]);
+  }
+  EXPECT_EQ(cu_.disable_opt & (1u << kPromoteRegs), 0u);
+  EXPECT_FALSE(cu_.mir_graph->PuntToInterpreter());
+}
+
+TEST_F(TypeInferenceTest, Phi2) {
+  static const TypeDef types[] = {
+      { "[F" },
+  };
+  static const MIRDef mirs[] = {
+      DEF_CONST(3u, Instruction::CONST, 0u, 100),
+      DEF_NEW_ARRAY(4u, Instruction::NEW_ARRAY, 1u, 0u, 0u),
+      DEF_NEW_ARRAY(5u, Instruction::NEW_ARRAY, 2u, 0u, 0u),
+      // Phi from [F and [F into [? infers [F.
+      DEF_PHI2(6u, 3u, 1u, 2u),
+      DEF_UNOP(6u, Instruction::ARRAY_LENGTH, 4u, 3u),
+  };
+  PrepareTypes(types);
+  BuildDexFile("()V", true);
+  PrepareDiamond();
+  PrepareMIRs(mirs);
+  PerformTypeInference();
+
+  ASSERT_EQ(arraysize(mirs), mir_count_);
+  static const SRegExpectation expectations[] = {
+      { 0u, kExpectCore | kExpectNarrow },
+      { 1u, kExpectRef | kExpectNarrow | kExpectArrayFp | kExpectArrayNarrow },
+      { 1u, kExpectRef | kExpectNarrow | kExpectArrayFp | kExpectArrayNarrow },
+      { 1u, kExpectRef | kExpectNarrow | kExpectArrayFp | kExpectArrayNarrow },
+      { 0u, kExpectCore | kExpectNarrow },
+  };
+  for (int32_t sreg = 0; sreg != arraysize(expectations); ++sreg) {
+    ExpectSRegType(sreg, expectations[sreg]);
+  }
+  EXPECT_EQ(cu_.disable_opt & (1u << kPromoteRegs), 0u);
+  EXPECT_FALSE(cu_.mir_graph->PuntToInterpreter());
+}
+
+TEST_F(TypeInferenceTest, Phi3) {
+  static const TypeDef types[] = {
+      { "[I" },
+      { "[F" },
+  };
+  static const MIRDef mirs[] = {
+      DEF_CONST(3u, Instruction::CONST, 0u, 100),
+      DEF_NEW_ARRAY(4u, Instruction::NEW_ARRAY, 1u, 0u, 0u),
+      DEF_NEW_ARRAY(5u, Instruction::NEW_ARRAY, 2u, 0u, 1u),
+      // Phi from [I and [F infers L.
+      DEF_PHI2(6u, 3u, 1u, 2u),
+  };
+  PrepareTypes(types);
+  BuildDexFile("()V", true);
+  PrepareDiamond();
+  PrepareMIRs(mirs);
+  PerformTypeInference();
+
+  ASSERT_EQ(arraysize(mirs), mir_count_);
+  static const SRegExpectation expectations[] = {
+      { 0u, kExpectCore | kExpectNarrow },
+      { 1u, kExpectRef | kExpectNarrow | kExpectArrayCore | kExpectArrayNarrow },
+      { 1u, kExpectRef | kExpectNarrow | kExpectArrayFp | kExpectArrayNarrow },
+      { 0u, kExpectRef | kExpectNarrow },
+  };
+  for (int32_t sreg = 0; sreg != arraysize(expectations); ++sreg) {
+    ExpectSRegType(sreg, expectations[sreg]);
+  }
+  EXPECT_EQ(cu_.disable_opt & (1u << kPromoteRegs), 0u);
+  EXPECT_FALSE(cu_.mir_graph->PuntToInterpreter());
+}
+
+TEST_F(TypeInferenceTest, Phi4) {
+  static const TypeDef types[] = {
+      { "[I" },
+  };
+  static const MIRDef mirs[] = {
+      DEF_CONST(3u, Instruction::CONST, 0u, 100),
+      DEF_NEW_ARRAY(4u, Instruction::NEW_ARRAY, 1u, 0u, 0u),
+      DEF_CONST(5u, Instruction::CONST, 2u, 0),
+      // Pseudo-phi from [I and null infers L.
+      DEF_PHI2(6u, 3u, 1u, 2u),
+  };
+  PrepareTypes(types);
+  BuildDexFile("()V", true);
+  PrepareDiamond();
+  PrepareMIRs(mirs);
+  PerformTypeInference();
+
+  ASSERT_EQ(arraysize(mirs), mir_count_);
+  static const SRegExpectation expectations[] = {
+      { 0u, kExpectCore | kExpectNarrow },
+      { 1u, kExpectRef | kExpectNarrow | kExpectArrayCore | kExpectArrayNarrow },
+      { 0u, kExpectRef | kExpectNarrow | kExpectNull },
+      { 0u, kExpectRef | kExpectNarrow },
+  };
+  for (int32_t sreg = 0; sreg != arraysize(expectations); ++sreg) {
+    ExpectSRegType(sreg, expectations[sreg]);
+  }
+  EXPECT_EQ(cu_.disable_opt & (1u << kPromoteRegs), 0u);
+  EXPECT_FALSE(cu_.mir_graph->PuntToInterpreter());
+}
+
+TEST_F(TypeInferenceTest, PhiConflict1) {
+  static const TypeDef types[] = {
+      { "[I" },
+      { "[F" },
+  };
+  static const MIRDef mirs[] = {
+      DEF_CONST(3u, Instruction::CONST, 0u, 100),
+      DEF_NEW_ARRAY(4u, Instruction::NEW_ARRAY, 1u, 0u, 0u),
+      DEF_NEW_ARRAY(5u, Instruction::NEW_ARRAY, 2u, 0u, 1u),
+      // Pseudo-phi from [I and [F into [? infers conflict [I/[F (then propagated upwards).
+      DEF_PHI2(6u, 3u, 1u, 2u),
+      DEF_UNOP(6u, Instruction::ARRAY_LENGTH, 4u, 3u),
+  };
+  PrepareTypes(types);
+  BuildDexFile("()V", true);
+  PrepareDiamond();
+  PrepareMIRs(mirs);
+  PerformTypeInference();
+
+  ASSERT_EQ(arraysize(mirs), mir_count_);
+  static const SRegExpectation expectations[] = {
+      { 0u, kExpectCore | kExpectNarrow },
+      { 1u, kExpectRef | kExpectNarrow | kExpectArrayCore | kExpectArrayFp | kExpectArrayNarrow },
+      { 1u, kExpectRef | kExpectNarrow | kExpectArrayCore | kExpectArrayFp | kExpectArrayNarrow },
+      { 1u, kExpectRef | kExpectNarrow | kExpectArrayCore | kExpectArrayFp | kExpectArrayNarrow },
+      { 0u, kExpectCore | kExpectNarrow },
+  };
+  for (int32_t sreg = 0; sreg != arraysize(expectations); ++sreg) {
+    ExpectSRegType(sreg, expectations[sreg], false);
+  }
+  // The type conflict in array element wasn't propagated to an SSA reg.
+  EXPECT_EQ(cu_.disable_opt & (1u << kPromoteRegs), 0u);
+  EXPECT_FALSE(cu_.mir_graph->PuntToInterpreter());
+}
+
+TEST_F(TypeInferenceTest, PhiConflict2) {
+  static const TypeDef types[] = {
+      { "[I" },
+      { "[F" },
+  };
+  static const MIRDef mirs[] = {
+      DEF_CONST(3u, Instruction::CONST, 0u, 100),
+      DEF_NEW_ARRAY(4u, Instruction::NEW_ARRAY, 1u, 0u, 0u),
+      DEF_NEW_ARRAY(5u, Instruction::NEW_ARRAY, 2u, 0u, 1u),
+      // Pseudo-phi from [I and [F into [? infers conflict [I/[F (then propagated upwards).
+      DEF_PHI2(6u, 3u, 1u, 2u),
+      DEF_AGET(6u, Instruction::AGET, 4u, 3u, 0u),
+  };
+  PrepareTypes(types);
+  BuildDexFile("()V", true);
+  PrepareDiamond();
+  PrepareMIRs(mirs);
+  PerformTypeInference();
+
+  ASSERT_EQ(arraysize(mirs), mir_count_);
+  static const SRegExpectation expectations[] = {
+      { 0u, kExpectCore | kExpectNarrow },
+      { 1u, kExpectRef | kExpectNarrow | kExpectArrayCore | kExpectArrayFp | kExpectArrayNarrow },
+      { 1u, kExpectRef | kExpectNarrow | kExpectArrayCore | kExpectArrayFp | kExpectArrayNarrow },
+      { 1u, kExpectRef | kExpectNarrow | kExpectArrayCore | kExpectArrayFp | kExpectArrayNarrow },
+      { 0u, kExpectCore | kExpectFp | kExpectNarrow },
+  };
+  for (int32_t sreg = 0; sreg != arraysize(expectations); ++sreg) {
+    ExpectSRegType(sreg, expectations[sreg], false);
+  }
+  // Type conflict in an SSA reg, register promotion disabled.
+  EXPECT_NE(cu_.disable_opt & (1u << kPromoteRegs), 0u);
+  EXPECT_FALSE(cu_.mir_graph->PuntToInterpreter());
+}
+
+TEST_F(TypeInferenceTest, Wide1) {
+  static const MIRDef mirs[] = {
+      DEF_CONST(3u, Instruction::CONST, 0u, 0),
+      DEF_CONST(3u, Instruction::CONST, 1u, 0),  // index
+      DEF_AGET(3u, Instruction::AGET_OBJECT, 2u, 0u, 1u),  // long[]
+      DEF_CONST_WIDE(3u, Instruction::CONST_WIDE, 3u, 0),  // long
+      DEF_APUT_WIDE(3u, Instruction::APUT_WIDE, 3u, 2u, 1u),
+      { 3u, Instruction::RETURN_OBJECT, 0, 0u, 1u, { 2u }, 0u, { } },
+  };
+
+  BuildDexFile("()[J", true);
+  PrepareSingleBlock();
+  PrepareMIRs(mirs);
+  PerformTypeInference();
+
+  ASSERT_EQ(arraysize(mirs), mir_count_);
+  static const SRegExpectation expectations[] = {
+      { 1u, kExpectRef | kExpectNarrow | kExpectNull | kExpectArrayRef | kExpectArrayNarrow },
+      { 0u, kExpectCore | kExpectNarrow },
+      { 1u, kExpectRef | kExpectNarrow | kExpectArrayCore | kExpectArrayWide },
+      { 0u, kExpectCore | kExpectWide },
+      // NOTE: High word checked implicitly for sreg = 3.
+  };
+  for (int32_t sreg = 0; sreg != arraysize(expectations); ++sreg) {
+    ExpectSRegType(sreg, expectations[sreg], false);
+  }
+  EXPECT_EQ(cu_.disable_opt & (1u << kPromoteRegs), 0u);
+  EXPECT_FALSE(cu_.mir_graph->PuntToInterpreter());
+}
+
+TEST_F(TypeInferenceTest, WideSizeConflict1) {
+  static const MIRDef mirs[] = {
+      DEF_CONST_WIDE(3u, Instruction::CONST_WIDE, 0u, 0),
+      DEF_MOVE(3u, Instruction::MOVE, 2u, 0u),
+  };
+
+  BuildDexFile("()V", true);
+  PrepareSingleBlock();
+  PrepareMIRs(mirs);
+  PerformTypeInference();
+
+  ASSERT_EQ(arraysize(mirs), mir_count_);
+  static const SRegExpectation expectations[] = {
+      { 0u, kExpectNarrow | kExpectWide },
+      { 0u, kExpectNarrow | kExpectWide },
+  };
+  ExpectSRegType(0u, expectations[0], false);
+  ExpectSRegType(2u, expectations[1], false);
+  EXPECT_TRUE(cu_.mir_graph->PuntToInterpreter());
+}
+
+TEST_F(TypeInferenceTest, ArrayLongLength) {
+  static const FieldDef sfields[] = {
+      { kClassName, "[J", "arrayLongField" },
+  };
+  static const MIRDef mirs[] = {
+      DEF_CONST(4u, Instruction::CONST, 0u, 0),
+      DEF_SGET(5u, Instruction::SGET_OBJECT, 1u, 0u),
+      DEF_PHI2(6u, 2u, 0u, 1u),
+      DEF_UNOP(6u, Instruction::ARRAY_LENGTH, 3u, 2u),
+      DEF_SGET(6u, Instruction::SGET_OBJECT, 4u, 0u),
+      DEF_UNOP(6u, Instruction::ARRAY_LENGTH, 5u, 4u),
+  };
+
+  PrepareSFields(sfields);
+  BuildDexFile("()V", true);
+  PrepareDiamond();
+  PrepareMIRs(mirs);
+  PerformTypeInference();
+
+  ASSERT_EQ(arraysize(mirs), mir_count_);
+  static const SRegExpectation expectations[] = {
+      { 1u, kExpectRef | kExpectNarrow | kExpectNull | kExpectArrayCore | kExpectArrayWide },
+      { 1u, kExpectRef | kExpectNarrow | kExpectArrayCore | kExpectArrayWide },
+      { 1u, kExpectRef | kExpectNarrow | kExpectArrayCore | kExpectArrayWide },
+      { 0u, kExpectCore | kExpectNarrow },
+      { 1u, kExpectRef | kExpectNarrow | kExpectArrayCore | kExpectArrayWide },
+      { 0u, kExpectCore | kExpectNarrow },
+  };
+  for (int32_t sreg = 0; sreg != arraysize(expectations); ++sreg) {
+    ExpectSRegType(sreg, expectations[sreg]);
+  }
+  EXPECT_EQ(cu_.disable_opt & (1u << kPromoteRegs), 0u);
+  EXPECT_FALSE(cu_.mir_graph->PuntToInterpreter());
+}
+
+TEST_F(TypeInferenceTest, ArrayArrayObjectLength) {
+  static const FieldDef sfields[] = {
+      { kClassName, "[[Ljava/lang/Object;", "arrayLongField" },
+  };
+  static const MIRDef mirs[] = {
+      DEF_CONST(4u, Instruction::CONST, 0u, 0),
+      DEF_SGET(5u, Instruction::SGET_OBJECT, 1u, 0u),
+      DEF_PHI2(6u, 2u, 0u, 1u),
+      DEF_UNOP(6u, Instruction::ARRAY_LENGTH, 3u, 2u),
+      DEF_SGET(6u, Instruction::SGET_OBJECT, 4u, 0u),
+      DEF_UNOP(6u, Instruction::ARRAY_LENGTH, 5u, 4u),
+  };
+
+  PrepareSFields(sfields);
+  BuildDexFile("()V", true);
+  PrepareDiamond();
+  PrepareMIRs(mirs);
+  PerformTypeInference();
+
+  ASSERT_EQ(arraysize(mirs), mir_count_);
+  static const SRegExpectation expectations[] = {
+      { 1u, kExpectRef | kExpectNarrow | kExpectNull | kExpectArrayRef | kExpectArrayNarrow },
+      { 2u, kExpectRef | kExpectNarrow | kExpectArrayRef | kExpectArrayNarrow },
+      { 1u, kExpectRef | kExpectNarrow | kExpectArrayRef | kExpectArrayNarrow },
+      { 0u, kExpectCore | kExpectNarrow },
+      { 2u, kExpectRef | kExpectNarrow | kExpectArrayRef | kExpectArrayNarrow },
+      { 0u, kExpectCore | kExpectNarrow },
+  };
+  for (int32_t sreg = 0; sreg != arraysize(expectations); ++sreg) {
+    ExpectSRegType(sreg, expectations[sreg]);
+  }
+  EXPECT_EQ(cu_.disable_opt & (1u << kPromoteRegs), 0u);
+  EXPECT_FALSE(cu_.mir_graph->PuntToInterpreter());
+}
+
+TEST_F(TypeInferenceTest, SGetAdd0SPut) {
+  static const FieldDef sfields[] = {
+      { kClassName, "I", "staticIntField" },
+  };
+  static const MIRDef mirs[] = {
+      DEF_SGET(3u, Instruction::SGET, 0u, 0u),
+      DEF_UNOP(3u, Instruction::ADD_INT_LIT8, 1u, 0u),  // +0
+      DEF_SPUT(3u, Instruction::SPUT, 1u, 0u),
+  };
+
+  PrepareSFields(sfields);
+  BuildDexFile("()V", true);
+  PrepareSingleBlock();
+  PrepareMIRs(mirs);
+  PerformTypeInference();
+
+  ASSERT_EQ(arraysize(mirs), mir_count_);
+  static const SRegExpectation expectations[] = {
+      { 0u, kExpectCore | kExpectNarrow },
+      { 0u, kExpectCore | kExpectNarrow },
+  };
+  for (int32_t sreg = 0; sreg != arraysize(expectations); ++sreg) {
+    ExpectSRegType(sreg, expectations[sreg]);
+  }
+  EXPECT_EQ(cu_.disable_opt & (1u << kPromoteRegs), 0u);
+  EXPECT_FALSE(cu_.mir_graph->PuntToInterpreter());
+}
+
+TEST_F(TypeInferenceTest, MoveObjectNull) {
+  static const MethodDef methods[] = {
+      { kClassName, "([I[D)V", "foo", kStatic },
+  };
+  static const MIRDef mirs[] = {
+      DEF_CONST(3u, Instruction::CONST, 0u, 0),
+      DEF_MOVE(3u, Instruction::MOVE_OBJECT, 1u, 0u),
+      DEF_INVOKE2(3u, Instruction::INVOKE_STATIC, 0u, 1u, 0u),
+  };
+
+  PrepareMethods(methods);
+  BuildDexFile("()V", true);
+  PrepareSingleBlock();
+  PrepareMIRs(mirs);
+  PerformTypeInference();
+
+  ASSERT_EQ(arraysize(mirs), mir_count_);
+  static const SRegExpectation expectation = {
+      1u,
+      kExpectRef | kExpectNarrow | kExpectNull |
+      kExpectArrayCore | kExpectArrayFp | kExpectArrayNarrow | kExpectArrayWide
+  };
+  ExpectSRegType(0u, expectation);
+  ExpectSRegType(1u, expectation);
+  EXPECT_EQ(cu_.disable_opt & (1u << kPromoteRegs), 0u);
+  EXPECT_FALSE(cu_.mir_graph->PuntToInterpreter());
+}
+
+TEST_F(TypeInferenceTest, MoveNull1) {
+  static const MethodDef methods[] = {
+      { kClassName, "([I[D)V", "foo", kStatic },
+  };
+  static const MIRDef mirs[] = {
+      DEF_CONST(3u, Instruction::CONST, 0u, 0),
+      DEF_MOVE(3u, Instruction::MOVE, 1u, 0u),
+      DEF_INVOKE2(3u, Instruction::INVOKE_STATIC, 0u, 1u, 0u),
+  };
+
+  PrepareMethods(methods);
+  BuildDexFile("()V", true);
+  PrepareSingleBlock();
+  PrepareMIRs(mirs);
+  PerformTypeInference();
+
+  ASSERT_EQ(arraysize(mirs), mir_count_);
+  static const SRegExpectation expectation = {
+      1u,
+      kExpectCore | kExpectRef | kExpectFp | kExpectNarrow | kExpectNull |
+      kExpectArrayCore | kExpectArrayFp | kExpectArrayNarrow | kExpectArrayWide
+  };
+  ExpectSRegType(0u, expectation);
+  ExpectSRegType(1u, expectation);
+  // Type conflict using move instead of move-object for null, register promotion disabled.
+  EXPECT_NE(cu_.disable_opt & (1u << kPromoteRegs), 0u);
+  EXPECT_FALSE(cu_.mir_graph->PuntToInterpreter());
+}
+
+TEST_F(TypeInferenceTest, MoveNull2) {
+  static const FieldDef sfields[] = {
+      { kClassName, "[F", "staticArrayArrayFloatField" },
+      { kClassName, "[I", "staticArrayIntField" },
+      { kClassName, "[[I", "staticArrayArrayIntField" },
+  };
+  static const MIRDef mirs[] = {
+      DEF_CONST(4u, Instruction::CONST, 0u, 0),
+      DEF_MOVE(4u, Instruction::MOVE_OBJECT, 1u, 0u),
+      DEF_MOVE(4u, Instruction::MOVE_OBJECT, 2u, 1u),
+      DEF_SGET(5u, Instruction::SGET_OBJECT, 3u, 0u),
+      DEF_SGET(5u, Instruction::SGET_OBJECT, 4u, 1u),
+      DEF_SGET(5u, Instruction::SGET_OBJECT, 5u, 2u),
+      DEF_PHI2(6u, 6u, 0u, 3u),
+      DEF_PHI2(6u, 7u, 1u, 4u),
+      DEF_PHI2(6u, 8u, 2u, 5u),
+      DEF_UNOP(6u, Instruction::ARRAY_LENGTH, 9u, 6u),
+      DEF_UNOP(6u, Instruction::ARRAY_LENGTH, 10u, 7u),
+      DEF_UNOP(6u, Instruction::ARRAY_LENGTH, 11u, 8u),
+      { 6u, Instruction::RETURN_OBJECT, 0, 0u, 1u, { 8u }, 0u, { } },
+  };
+
+  PrepareSFields(sfields);
+  BuildDexFile("()[[I", true);
+  PrepareDiamond();
+  PrepareMIRs(mirs);
+  PerformTypeInference();
+
+  ASSERT_EQ(arraysize(mirs), mir_count_);
+  static const SRegExpectation expectations[] = {
+      { 1u, kExpectRef | kExpectNarrow | kExpectNull |
+          kExpectArrayCore | kExpectArrayFp | kExpectArrayRef | kExpectArrayNarrow },
+      { 1u, kExpectRef | kExpectNarrow | kExpectNull |
+          kExpectArrayCore | kExpectArrayFp | kExpectArrayRef | kExpectArrayNarrow},
+      { 1u, kExpectRef | kExpectNarrow | kExpectNull |
+          kExpectArrayCore | kExpectArrayFp | kExpectArrayRef | kExpectArrayNarrow},
+      { 1u, kExpectRef | kExpectNarrow | kExpectArrayFp | kExpectArrayNarrow },
+      { 1u, kExpectRef | kExpectNarrow | kExpectArrayCore | kExpectArrayNarrow },
+      { 2u, kExpectRef | kExpectNarrow | kExpectArrayCore | kExpectArrayNarrow },
+      { 1u, kExpectRef | kExpectNarrow | kExpectArrayFp | kExpectArrayNarrow },
+      { 1u, kExpectRef | kExpectNarrow | kExpectArrayCore | kExpectArrayNarrow },
+      { 2u, kExpectRef | kExpectNarrow | kExpectArrayCore | kExpectArrayNarrow },
+      { 0u, kExpectCore | kExpectNarrow },
+      { 0u, kExpectCore | kExpectNarrow },
+      { 0u, kExpectCore | kExpectNarrow },
+  };
+  for (int32_t sreg = 0; sreg != arraysize(expectations); ++sreg) {
+    ExpectSRegType(sreg, expectations[sreg]);
+  }
+  // Type conflict in array type not propagated to actual register.
+  EXPECT_EQ(cu_.disable_opt & (1u << kPromoteRegs), 0u);
+  EXPECT_FALSE(cu_.mir_graph->PuntToInterpreter());
+}
+
+TEST_F(TypeInferenceTest, ReuseNull1) {
+  static const FieldDef sfields[] = {
+      { kClassName, "[I", "staticArrayLongField" },
+      { kClassName, "[[F", "staticArrayArrayFloatField" },
+  };
+  static const MIRDef mirs[] = {
+      DEF_CONST(3u, Instruction::CONST, 0u, 0),
+      DEF_SPUT(3u, Instruction::SPUT_OBJECT, 0u, 0u),
+      DEF_SPUT(3u, Instruction::SPUT_OBJECT, 0u, 1u),
+  };
+
+  PrepareSFields(sfields);
+  BuildDexFile("()V", true);
+  PrepareSingleBlock();
+  PrepareMIRs(mirs);
+  PerformTypeInference();
+
+  ASSERT_EQ(arraysize(mirs), mir_count_);
+  static const SRegExpectation expectation = {
+      1u,
+      kExpectRef | kExpectNarrow | kExpectNull |
+      kExpectArrayCore | kExpectArrayRef | kExpectArrayFp | kExpectArrayNarrow
+  };
+  ExpectSRegType(0u, expectation);
+  // Type conflict in array type not propagated to actual register.
+  EXPECT_EQ(cu_.disable_opt & (1u << kPromoteRegs), 0u);
+  EXPECT_FALSE(cu_.mir_graph->PuntToInterpreter());
+}
+
+TEST_F(TypeInferenceTest, ReuseNull2) {
+  static const FieldDef sfields[] = {
+      { kClassName, "[J", "staticArrayLongField" },
+      { kClassName, "[[F", "staticArrayArrayFloatField" },
+  };
+  static const MIRDef mirs[] = {
+      DEF_CONST(3u, Instruction::CONST, 0u, 0),
+      DEF_SPUT(3u, Instruction::SPUT_OBJECT, 0u, 0u),
+      DEF_SPUT(3u, Instruction::SPUT_OBJECT, 0u, 1u),
+  };
+
+  PrepareSFields(sfields);
+  BuildDexFile("()V", true);
+  PrepareSingleBlock();
+  PrepareMIRs(mirs);
+  PerformTypeInference();
+
+  ASSERT_EQ(arraysize(mirs), mir_count_);
+  static const SRegExpectation expectation = {
+      1u,
+      kExpectRef | kExpectNarrow | kExpectNull |
+      kExpectArrayCore | kExpectArrayRef | kExpectArrayFp | kExpectArrayNarrow | kExpectArrayWide
+  };
+  ExpectSRegType(0u, expectation);
+  // Type conflict in array type not propagated to actual register.
+  EXPECT_EQ(cu_.disable_opt & (1u << kPromoteRegs), 0u);
+  EXPECT_FALSE(cu_.mir_graph->PuntToInterpreter());
+}
+
+TEST_F(TypeInferenceTest, ArgIsNonNull) {
+  constexpr uint32_t thiz = kLocalVRs;
+  static const MIRDef mirs[] = {
+      DEF_MOVE(3u, Instruction::MOVE_OBJECT, 0u, thiz),
+  };
+
+  BuildDexFile("(Ljava/lang/Object;)V", true);
+  PrepareSingleBlock();
+  PrepareMIRs(mirs);
+  PerformTypeInference();
+
+  ASSERT_EQ(arraysize(mirs), mir_count_);
+  static const SRegExpectation expectation = {
+      0u,
+      kExpectRef | kExpectNarrow
+  };
+  ExpectSRegType(0u, expectation);
+  // Type conflict in array type not propagated to actual register.
+  EXPECT_EQ(cu_.disable_opt & (1u << kPromoteRegs), 0u);
+  EXPECT_FALSE(cu_.mir_graph->PuntToInterpreter());
+}
+
+TEST_F(TypeInferenceTest, IfCc) {
+  static const FieldDef sfields[] = {
+      { kClassName, "I", "intField" },
+  };
+  static const MIRDef mirs[] = {
+      DEF_SGET(3u, Instruction::SGET, 0u, 0u),
+      DEF_CONST(3u, Instruction::CONST, 1u, 0u),
+      { 3u, Instruction::IF_EQ, 0, 0u, 2, { 0u, 1u }, 0, { } },
+  };
+
+  PrepareSFields(sfields);
+  BuildDexFile("()V", false);
+  PrepareDiamond();
+  PrepareMIRs(mirs);
+  PerformTypeInference();
+
+  ASSERT_EQ(arraysize(mirs), mir_count_);
+  static const SRegExpectation expectations[] = {
+      { 0u, kExpectCore | kExpectNarrow },
+      { 0u, kExpectCore | kExpectNarrow },
+  };
+  for (int32_t sreg = 0; sreg != arraysize(expectations); ++sreg) {
+    ExpectSRegType(sreg, expectations[sreg]);
+  }
+  EXPECT_EQ(cu_.disable_opt & (1u << kPromoteRegs), 0u);
+  EXPECT_FALSE(cu_.mir_graph->PuntToInterpreter());
+}
+
+}  // namespace art
diff --git a/compiler/dex/verification_results.cc b/compiler/dex/verification_results.cc
index a4df00e..c1d5cb7 100644
--- a/compiler/dex/verification_results.cc
+++ b/compiler/dex/verification_results.cc
@@ -46,7 +46,7 @@
 }
 
 bool VerificationResults::ProcessVerifiedMethod(verifier::MethodVerifier* method_verifier) {
-  DCHECK(method_verifier != NULL);
+  DCHECK(method_verifier != nullptr);
   MethodReference ref = method_verifier->GetMethodReference();
   bool compile = IsCandidateForCompilation(ref, method_verifier->GetAccessFlags());
   const VerifiedMethod* verified_method = VerifiedMethod::Create(method_verifier, compile);
diff --git a/compiler/dex/verified_method.cc b/compiler/dex/verified_method.cc
index 977757f..7eba515 100644
--- a/compiler/dex/verified_method.cc
+++ b/compiler/dex/verified_method.cc
@@ -166,7 +166,7 @@
         }
       }
     } else {
-      DCHECK(i >= 65536 || reg_bitmap == NULL);
+      DCHECK(i >= 65536 || reg_bitmap == nullptr);
     }
   }
 }
@@ -283,7 +283,7 @@
     }
     mirror::ArtMethod* abstract_method = method_verifier->GetDexCache()->GetResolvedMethod(
         is_range ? inst->VRegB_3rc() : inst->VRegB_35c());
-    if (abstract_method == NULL) {
+    if (abstract_method == nullptr) {
       // If the method is not found in the cache this means that it was never found
       // by ResolveMethodAndCheckAccess() called when verifying invoke_*.
       continue;
diff --git a/compiler/dex/verified_method.h b/compiler/dex/verified_method.h
index 437ae52..ad07639 100644
--- a/compiler/dex/verified_method.h
+++ b/compiler/dex/verified_method.h
@@ -59,7 +59,7 @@
     return safe_cast_set_;
   }
 
-  // Returns the devirtualization target method, or nullptr if none.
+  // Returns the devirtualization target method, or null if none.
   const MethodReference* GetDevirtTarget(uint32_t dex_pc) const;
 
   // Returns the dequicken field / method for a quick invoke / field get. Returns null if there is
diff --git a/compiler/dex/vreg_analysis.cc b/compiler/dex/vreg_analysis.cc
index 2b78e38..e681bcf 100644
--- a/compiler/dex/vreg_analysis.cc
+++ b/compiler/dex/vreg_analysis.cc
@@ -23,400 +23,6 @@
 
 namespace art {
 
-bool MIRGraph::SetFp(int index, bool is_fp) {
-  bool change = false;
-  if (is_fp && !reg_location_[index].fp) {
-    reg_location_[index].fp = true;
-    reg_location_[index].defined = true;
-    change = true;
-  }
-  return change;
-}
-
-bool MIRGraph::SetFp(int index) {
-  bool change = false;
-  if (!reg_location_[index].fp) {
-    reg_location_[index].fp = true;
-    reg_location_[index].defined = true;
-    change = true;
-  }
-  return change;
-}
-
-bool MIRGraph::SetCore(int index, bool is_core) {
-  bool change = false;
-  if (is_core && !reg_location_[index].defined) {
-    reg_location_[index].core = true;
-    reg_location_[index].defined = true;
-    change = true;
-  }
-  return change;
-}
-
-bool MIRGraph::SetCore(int index) {
-  bool change = false;
-  if (!reg_location_[index].defined) {
-    reg_location_[index].core = true;
-    reg_location_[index].defined = true;
-    change = true;
-  }
-  return change;
-}
-
-bool MIRGraph::SetRef(int index, bool is_ref) {
-  bool change = false;
-  if (is_ref && !reg_location_[index].defined) {
-    reg_location_[index].ref = true;
-    reg_location_[index].defined = true;
-    change = true;
-  }
-  return change;
-}
-
-bool MIRGraph::SetRef(int index) {
-  bool change = false;
-  if (!reg_location_[index].defined) {
-    reg_location_[index].ref = true;
-    reg_location_[index].defined = true;
-    change = true;
-  }
-  return change;
-}
-
-bool MIRGraph::SetWide(int index, bool is_wide) {
-  bool change = false;
-  if (is_wide && !reg_location_[index].wide) {
-    reg_location_[index].wide = true;
-    change = true;
-  }
-  return change;
-}
-
-bool MIRGraph::SetWide(int index) {
-  bool change = false;
-  if (!reg_location_[index].wide) {
-    reg_location_[index].wide = true;
-    change = true;
-  }
-  return change;
-}
-
-bool MIRGraph::SetHigh(int index, bool is_high) {
-  bool change = false;
-  if (is_high && !reg_location_[index].high_word) {
-    reg_location_[index].high_word = true;
-    change = true;
-  }
-  return change;
-}
-
-bool MIRGraph::SetHigh(int index) {
-  bool change = false;
-  if (!reg_location_[index].high_word) {
-    reg_location_[index].high_word = true;
-    change = true;
-  }
-  return change;
-}
-
-
-/*
- * Infer types and sizes.  We don't need to track change on sizes,
- * as it doesn't propagate.  We're guaranteed at least one pass through
- * the cfg.
- */
-bool MIRGraph::InferTypeAndSize(BasicBlock* bb, MIR* mir, bool changed) {
-  SSARepresentation *ssa_rep = mir->ssa_rep;
-
-  /*
-   * The dex bytecode definition does not explicitly outlaw the definition of the same
-   * virtual register to be used in both a 32-bit and 64-bit pair context.  However, dx
-   * does not generate this pattern (at least recently).  Further, in the next revision of
-   * dex, we will forbid this.  To support the few cases in the wild, detect this pattern
-   * and punt to the interpreter.
-   */
-  bool type_mismatch = false;
-
-  if (ssa_rep) {
-    uint64_t attrs = GetDataFlowAttributes(mir);
-    const int* uses = ssa_rep->uses;
-    const int* defs = ssa_rep->defs;
-
-    // Handle defs
-    if (attrs & DF_DA) {
-      if (attrs & DF_CORE_A) {
-        changed |= SetCore(defs[0]);
-      }
-      if (attrs & DF_REF_A) {
-        changed |= SetRef(defs[0]);
-      }
-      if (attrs & DF_A_WIDE) {
-        reg_location_[defs[0]].wide = true;
-        reg_location_[defs[1]].wide = true;
-        reg_location_[defs[1]].high_word = true;
-        DCHECK_EQ(SRegToVReg(defs[0])+1,
-        SRegToVReg(defs[1]));
-      }
-    }
-
-
-    // Handles uses
-    int next = 0;
-    if (attrs & DF_UA) {
-      if (attrs & DF_CORE_A) {
-        changed |= SetCore(uses[next]);
-      }
-      if (attrs & DF_REF_A) {
-        changed |= SetRef(uses[next]);
-      }
-      if (attrs & DF_A_WIDE) {
-        reg_location_[uses[next]].wide = true;
-        reg_location_[uses[next + 1]].wide = true;
-        reg_location_[uses[next + 1]].high_word = true;
-        DCHECK_EQ(SRegToVReg(uses[next])+1,
-        SRegToVReg(uses[next + 1]));
-        next += 2;
-      } else {
-        type_mismatch |= reg_location_[uses[next]].wide;
-        next++;
-      }
-    }
-    if (attrs & DF_UB) {
-      if (attrs & DF_CORE_B) {
-        changed |= SetCore(uses[next]);
-      }
-      if (attrs & DF_REF_B) {
-        changed |= SetRef(uses[next]);
-      }
-      if (attrs & DF_B_WIDE) {
-        reg_location_[uses[next]].wide = true;
-        reg_location_[uses[next + 1]].wide = true;
-        reg_location_[uses[next + 1]].high_word = true;
-        DCHECK_EQ(SRegToVReg(uses[next])+1,
-                             SRegToVReg(uses[next + 1]));
-        next += 2;
-      } else {
-        type_mismatch |= reg_location_[uses[next]].wide;
-        next++;
-      }
-    }
-    if (attrs & DF_UC) {
-      if (attrs & DF_CORE_C) {
-        changed |= SetCore(uses[next]);
-      }
-      if (attrs & DF_REF_C) {
-        changed |= SetRef(uses[next]);
-      }
-      if (attrs & DF_C_WIDE) {
-        reg_location_[uses[next]].wide = true;
-        reg_location_[uses[next + 1]].wide = true;
-        reg_location_[uses[next + 1]].high_word = true;
-        DCHECK_EQ(SRegToVReg(uses[next])+1,
-        SRegToVReg(uses[next + 1]));
-      } else {
-        type_mismatch |= reg_location_[uses[next]].wide;
-      }
-    }
-
-    // Special-case return handling
-    if ((mir->dalvikInsn.opcode == Instruction::RETURN) ||
-        (mir->dalvikInsn.opcode == Instruction::RETURN_WIDE) ||
-        (mir->dalvikInsn.opcode == Instruction::RETURN_OBJECT)) {
-      switch (cu_->shorty[0]) {
-          case 'I':
-            type_mismatch |= reg_location_[uses[0]].wide;
-            changed |= SetCore(uses[0]);
-            break;
-          case 'J':
-            changed |= SetCore(uses[0]);
-            changed |= SetCore(uses[1]);
-            reg_location_[uses[0]].wide = true;
-            reg_location_[uses[1]].wide = true;
-            reg_location_[uses[1]].high_word = true;
-            break;
-          case 'F':
-            type_mismatch |= reg_location_[uses[0]].wide;
-            changed |= SetFp(uses[0]);
-            break;
-          case 'D':
-            changed |= SetFp(uses[0]);
-            changed |= SetFp(uses[1]);
-            reg_location_[uses[0]].wide = true;
-            reg_location_[uses[1]].wide = true;
-            reg_location_[uses[1]].high_word = true;
-            break;
-          case 'L':
-            type_mismatch |= reg_location_[uses[0]].wide;
-            changed |= SetRef(uses[0]);
-            break;
-          default: break;
-      }
-    }
-
-    // Special-case handling for format 35c/3rc invokes
-    Instruction::Code opcode = mir->dalvikInsn.opcode;
-    int flags = MIR::DecodedInstruction::IsPseudoMirOp(opcode) ?
-                  0 : mir->dalvikInsn.FlagsOf();
-    if ((flags & Instruction::kInvoke) &&
-        (attrs & (DF_FORMAT_35C | DF_FORMAT_3RC))) {
-      DCHECK_EQ(next, 0);
-      const auto& lowering_info = GetMethodLoweringInfo(mir);
-      const char* shorty = GetShortyFromMethodReference(lowering_info.GetTargetMethod());
-      // Handle result type if floating point
-      if ((shorty[0] == 'F') || (shorty[0] == 'D')) {
-        MIR* move_result_mir = FindMoveResult(bb, mir);
-        // Result might not be used at all, so no move-result
-        if (move_result_mir && (move_result_mir->dalvikInsn.opcode !=
-            Instruction::MOVE_RESULT_OBJECT)) {
-          SSARepresentation* tgt_rep = move_result_mir->ssa_rep;
-          DCHECK(tgt_rep != NULL);
-          tgt_rep->fp_def[0] = true;
-          changed |= SetFp(tgt_rep->defs[0]);
-          if (shorty[0] == 'D') {
-            tgt_rep->fp_def[1] = true;
-            changed |= SetFp(tgt_rep->defs[1]);
-          }
-        }
-      }
-      int num_uses = mir->dalvikInsn.vA;
-      // If this is a non-static invoke, mark implicit "this"
-      if (!IsInstructionInvokeStatic(mir->dalvikInsn.opcode)) {
-        reg_location_[uses[next]].defined = true;
-        reg_location_[uses[next]].ref = true;
-        type_mismatch |= reg_location_[uses[next]].wide;
-        next++;
-      }
-      uint32_t cpos = 1;
-      if (strlen(shorty) > 1) {
-        for (int i = next; i < num_uses;) {
-          DCHECK_LT(cpos, strlen(shorty));
-          switch (shorty[cpos++]) {
-            case 'D':
-              ssa_rep->fp_use[i] = true;
-              ssa_rep->fp_use[i+1] = true;
-              reg_location_[uses[i]].wide = true;
-              reg_location_[uses[i+1]].wide = true;
-              reg_location_[uses[i+1]].high_word = true;
-              DCHECK_EQ(SRegToVReg(uses[i])+1, SRegToVReg(uses[i+1]));
-              i++;
-              break;
-            case 'J':
-              reg_location_[uses[i]].wide = true;
-              reg_location_[uses[i+1]].wide = true;
-              reg_location_[uses[i+1]].high_word = true;
-              DCHECK_EQ(SRegToVReg(uses[i])+1, SRegToVReg(uses[i+1]));
-              changed |= SetCore(uses[i]);
-              i++;
-              break;
-            case 'F':
-              type_mismatch |= reg_location_[uses[i]].wide;
-              ssa_rep->fp_use[i] = true;
-              break;
-            case 'L':
-              type_mismatch |= reg_location_[uses[i]].wide;
-              changed |= SetRef(uses[i]);
-              break;
-            default:
-              type_mismatch |= reg_location_[uses[i]].wide;
-              changed |= SetCore(uses[i]);
-              break;
-          }
-          i++;
-        }
-      }
-    }
-
-    for (int i = 0; ssa_rep->fp_use && i< ssa_rep->num_uses; i++) {
-      if (ssa_rep->fp_use[i]) {
-        changed |= SetFp(uses[i]);
-      }
-    }
-    for (int i = 0; ssa_rep->fp_def && i< ssa_rep->num_defs; i++) {
-      if (ssa_rep->fp_def[i]) {
-        changed |= SetFp(defs[i]);
-      }
-    }
-    // Special-case handling for moves & Phi
-    if (attrs & (DF_IS_MOVE | DF_NULL_TRANSFER_N)) {
-      /*
-       * If any of our inputs or outputs is defined, set all.
-       * Some ugliness related to Phi nodes and wide values.
-       * The Phi set will include all low words or all high
-       * words, so we have to treat them specially.
-       */
-      bool is_phi = (static_cast<int>(mir->dalvikInsn.opcode) == kMirOpPhi);
-      RegLocation rl_temp = reg_location_[defs[0]];
-      bool defined_fp = rl_temp.defined && rl_temp.fp;
-      bool defined_core = rl_temp.defined && rl_temp.core;
-      bool defined_ref = rl_temp.defined && rl_temp.ref;
-      bool is_wide = rl_temp.wide || ((attrs & DF_A_WIDE) != 0);
-      bool is_high = is_phi && rl_temp.wide && rl_temp.high_word;
-      for (int i = 0; i < ssa_rep->num_uses; i++) {
-        rl_temp = reg_location_[uses[i]];
-        defined_fp |= rl_temp.defined && rl_temp.fp;
-        defined_core |= rl_temp.defined && rl_temp.core;
-        defined_ref |= rl_temp.defined && rl_temp.ref;
-        is_wide |= rl_temp.wide;
-        is_high |= is_phi && rl_temp.wide && rl_temp.high_word;
-      }
-      /*
-       * We don't normally expect to see a Dalvik register definition used both as a
-       * floating point and core value, though technically it could happen with constants.
-       * Until we have proper typing, detect this situation and disable register promotion
-       * (which relies on the distinction between core a fp usages).
-       */
-      if ((defined_fp && (defined_core | defined_ref)) &&
-          ((cu_->disable_opt & (1 << kPromoteRegs)) == 0)) {
-        LOG(WARNING) << PrettyMethod(cu_->method_idx, *cu_->dex_file)
-                     << " op at block " << bb->id
-                     << " has both fp and core/ref uses for same def.";
-        cu_->disable_opt |= (1 << kPromoteRegs);
-      }
-      changed |= SetFp(defs[0], defined_fp);
-      changed |= SetCore(defs[0], defined_core);
-      changed |= SetRef(defs[0], defined_ref);
-      changed |= SetWide(defs[0], is_wide);
-      changed |= SetHigh(defs[0], is_high);
-      if (attrs & DF_A_WIDE) {
-        changed |= SetWide(defs[1]);
-        changed |= SetHigh(defs[1]);
-      }
-
-      bool has_ins = (GetNumOfInVRs() > 0);
-
-      for (int i = 0; i < ssa_rep->num_uses; i++) {
-        if (has_ins && IsInVReg(uses[i])) {
-          // NB: The SSA name for the first def of an in-reg will be the same as
-          // the reg's actual name.
-          if (!reg_location_[uses[i]].fp && defined_fp) {
-            // If we were about to infer that this first def of an in-reg is a float
-            // when it wasn't previously (because float/int is set during SSA initialization),
-            // do not allow this to happen.
-            continue;
-          }
-        }
-        changed |= SetFp(uses[i], defined_fp);
-        changed |= SetCore(uses[i], defined_core);
-        changed |= SetRef(uses[i], defined_ref);
-        changed |= SetWide(uses[i], is_wide);
-        changed |= SetHigh(uses[i], is_high);
-      }
-      if (attrs & DF_A_WIDE) {
-        DCHECK_EQ(ssa_rep->num_uses, 2);
-        changed |= SetWide(uses[1]);
-        changed |= SetHigh(uses[1]);
-      }
-    }
-  }
-  if (type_mismatch) {
-    LOG(WARNING) << "Deprecated dex type mismatch, interpreting "
-                 << PrettyMethod(cu_->method_idx, *cu_->dex_file);
-    LOG(INFO) << "@ 0x" << std::hex << mir->offset;
-    SetPuntToInterpreter(true);
-  }
-  return changed;
-}
-
 static const char* storage_name[] = {" Frame ", "PhysReg", " CompilerTemp "};
 
 void MIRGraph::DumpRegLocTable(RegLocation* table, int count) {
@@ -456,56 +62,6 @@
   loc[method_sreg].defined = true;
 
   reg_location_ = loc;
-
-  int num_regs = GetNumOfCodeVRs();
-
-  /* Add types of incoming arguments based on signature */
-  int num_ins = GetNumOfInVRs();
-  if (num_ins > 0) {
-    int s_reg = num_regs - num_ins;
-    if ((cu_->access_flags & kAccStatic) == 0) {
-      // For non-static, skip past "this"
-      reg_location_[s_reg].defined = true;
-      reg_location_[s_reg].ref = true;
-      s_reg++;
-    }
-    const char* shorty = cu_->shorty;
-    int shorty_len = strlen(shorty);
-    for (int i = 1; i < shorty_len; i++) {
-      switch (shorty[i]) {
-        case 'D':
-          reg_location_[s_reg].wide = true;
-          reg_location_[s_reg+1].high_word = true;
-          reg_location_[s_reg+1].fp = true;
-          DCHECK_EQ(SRegToVReg(s_reg)+1, SRegToVReg(s_reg+1));
-          reg_location_[s_reg].fp = true;
-          reg_location_[s_reg].defined = true;
-          s_reg++;
-          break;
-        case 'J':
-          reg_location_[s_reg].wide = true;
-          reg_location_[s_reg+1].high_word = true;
-          DCHECK_EQ(SRegToVReg(s_reg)+1, SRegToVReg(s_reg+1));
-          reg_location_[s_reg].core = true;
-          reg_location_[s_reg].defined = true;
-          s_reg++;
-          break;
-        case 'F':
-          reg_location_[s_reg].fp = true;
-          reg_location_[s_reg].defined = true;
-          break;
-        case 'L':
-          reg_location_[s_reg].ref = true;
-          reg_location_[s_reg].defined = true;
-          break;
-        default:
-          reg_location_[s_reg].core = true;
-          reg_location_[s_reg].defined = true;
-          break;
-        }
-        s_reg++;
-      }
-  }
 }
 
 /*
diff --git a/compiler/driver/compiler_driver-inl.h b/compiler/driver/compiler_driver-inl.h
index b4d4695..bad8335 100644
--- a/compiler/driver/compiler_driver-inl.h
+++ b/compiler/driver/compiler_driver-inl.h
@@ -79,7 +79,7 @@
   }
   if (UNLIKELY(resolved_field->IsStatic() != is_static)) {
     // ClassLinker can return a field of the wrong kind directly from the DexCache.
-    // Silently return nullptr on such incompatible class change.
+    // Silently return null on such incompatible class change.
     return nullptr;
   }
   return resolved_field;
@@ -206,7 +206,7 @@
   }
   if (check_incompatible_class_change &&
       UNLIKELY(resolved_method->CheckIncompatibleClassChange(invoke_type))) {
-    // Silently return nullptr on incompatible class change.
+    // Silently return null on incompatible class change.
     return nullptr;
   }
   return resolved_method;
@@ -302,7 +302,7 @@
                                                   target_dex_cache, class_loader,
                                                   NullHandle<mirror::ArtMethod>(), kVirtual);
     }
-    CHECK(called_method != NULL);
+    CHECK(called_method != nullptr);
     CHECK(!called_method->IsAbstract());
     int stats_flags = kFlagMethodResolved;
     GetCodeAndMethodForDirectCall(/*out*/invoke_type,
diff --git a/compiler/driver/compiler_driver.cc b/compiler/driver/compiler_driver.cc
index e665e1d..c858326 100644
--- a/compiler/driver/compiler_driver.cc
+++ b/compiler/driver/compiler_driver.cc
@@ -495,7 +495,8 @@
                                 const std::vector<const DexFile*>& dex_files,
                                 TimingLogger* timings) {
   DCHECK(!Runtime::Current()->IsStarted());
-  std::unique_ptr<ThreadPool> thread_pool(new ThreadPool("Compiler driver thread pool", thread_count_ - 1));
+  std::unique_ptr<ThreadPool> thread_pool(
+      new ThreadPool("Compiler driver thread pool", thread_count_ - 1));
   VLOG(compiler) << "Before precompile " << GetMemoryUsageString(false);
   PreCompile(class_loader, dex_files, thread_pool.get(), timings);
   Compile(class_loader, dex_files, thread_pool.get(), timings);
@@ -2101,7 +2102,8 @@
   VLOG(compiler) << "Compile: " << GetMemoryUsageString(false);
 }
 
-void CompilerDriver::CompileClass(const ParallelCompilationManager* manager, size_t class_def_index) {
+void CompilerDriver::CompileClass(const ParallelCompilationManager* manager,
+                                  size_t class_def_index) {
   ATRACE_CALL();
   const DexFile& dex_file = *manager->GetDexFile();
   const DexFile::ClassDef& class_def = dex_file.GetClassDef(class_def_index);
@@ -2251,7 +2253,7 @@
                    // Is eligable for compilation by methods-to-compile filter.
                    IsMethodToCompile(method_ref);
     if (compile) {
-      // NOTE: if compiler declines to compile this method, it will return nullptr.
+      // NOTE: if compiler declines to compile this method, it will return null.
       compiled_method = compiler_->Compile(code_item, access_flags, invoke_type, class_def_idx,
                                            method_idx, class_loader, dex_file);
     }
diff --git a/compiler/driver/compiler_driver.h b/compiler/driver/compiler_driver.h
index 50e1fb1..03c5c5c 100644
--- a/compiler/driver/compiler_driver.h
+++ b/compiler/driver/compiler_driver.h
@@ -94,7 +94,7 @@
   // Create a compiler targeting the requested "instruction_set".
   // "image" should be true if image specific optimizations should be
   // enabled.  "image_classes" lets the compiler know what classes it
-  // can assume will be in the image, with nullptr implying all available
+  // can assume will be in the image, with null implying all available
   // classes.
   explicit CompilerDriver(const CompilerOptions* compiler_options,
                           VerificationResults* verification_results,
@@ -228,7 +228,7 @@
   mirror::ClassLoader* GetClassLoader(ScopedObjectAccess& soa, const DexCompilationUnit* mUnit)
     SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
 
-  // Resolve compiling method's class. Returns nullptr on failure.
+  // Resolve compiling method's class. Returns null on failure.
   mirror::Class* ResolveCompilingMethodsClass(
       const ScopedObjectAccess& soa, Handle<mirror::DexCache> dex_cache,
       Handle<mirror::ClassLoader> class_loader, const DexCompilationUnit* mUnit)
@@ -240,7 +240,7 @@
       const DexCompilationUnit* mUnit)
     SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
 
-  // Resolve a field. Returns nullptr on failure, including incompatible class change.
+  // Resolve a field. Returns null on failure, including incompatible class change.
   // NOTE: Unlike ClassLinker's ResolveField(), this method enforces is_static.
   ArtField* ResolveField(
       const ScopedObjectAccess& soa, Handle<mirror::DexCache> dex_cache,
@@ -290,7 +290,7 @@
                                       ArtField* resolved_field)
     SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
 
-  // Resolve a method. Returns nullptr on failure, including incompatible class change.
+  // Resolve a method. Returns null on failure, including incompatible class change.
   mirror::ArtMethod* ResolveMethod(
       ScopedObjectAccess& soa, Handle<mirror::DexCache> dex_cache,
       Handle<mirror::ClassLoader> class_loader, const DexCompilationUnit* mUnit,
@@ -592,16 +592,16 @@
   const bool image_;
 
   // If image_ is true, specifies the classes that will be included in
-  // the image. Note if image_classes_ is nullptr, all classes are
+  // the image. Note if image_classes_ is null, all classes are
   // included in the image.
   std::unique_ptr<std::unordered_set<std::string>> image_classes_;
 
-  // Specifies the classes that will be compiled. Note that if classes_to_compile_ is nullptr,
+  // Specifies the classes that will be compiled. Note that if classes_to_compile_ is null,
   // all classes are eligible for compilation (duplication filters etc. will still apply).
   // This option may be restricted to the boot image, depending on a flag in the implementation.
   std::unique_ptr<std::unordered_set<std::string>> classes_to_compile_;
 
-  // Specifies the methods that will be compiled. Note that if methods_to_compile_ is nullptr,
+  // Specifies the methods that will be compiled. Note that if methods_to_compile_ is null,
   // all methods are eligible for compilation (compilation filters etc. will still apply).
   // This option may be restricted to the boot image, depending on a flag in the implementation.
   std::unique_ptr<std::unordered_set<std::string>> methods_to_compile_;
diff --git a/compiler/driver/compiler_driver_test.cc b/compiler/driver/compiler_driver_test.cc
index ded50ca..5085f32 100644
--- a/compiler/driver/compiler_driver_test.cc
+++ b/compiler/driver/compiler_driver_test.cc
@@ -56,20 +56,20 @@
     CHECK(started);
     env_ = Thread::Current()->GetJniEnv();
     class_ = env_->FindClass(class_name);
-    CHECK(class_ != NULL) << "Class not found: " << class_name;
+    CHECK(class_ != nullptr) << "Class not found: " << class_name;
     if (is_virtual) {
       mid_ = env_->GetMethodID(class_, method, signature);
     } else {
       mid_ = env_->GetStaticMethodID(class_, method, signature);
     }
-    CHECK(mid_ != NULL) << "Method not found: " << class_name << "." << method << signature;
+    CHECK(mid_ != nullptr) << "Method not found: " << class_name << "." << method << signature;
   }
 
   void MakeAllExecutable(jobject class_loader) {
     const std::vector<const DexFile*> class_path = GetDexFiles(class_loader);
     for (size_t i = 0; i != class_path.size(); ++i) {
       const DexFile* dex_file = class_path[i];
-      CHECK(dex_file != NULL);
+      CHECK(dex_file != nullptr);
       MakeDexFileExecutable(class_loader, *dex_file);
     }
   }
@@ -84,7 +84,7 @@
       Handle<mirror::ClassLoader> loader(
           hs.NewHandle(soa.Decode<mirror::ClassLoader*>(class_loader)));
       mirror::Class* c = class_linker->FindClass(soa.Self(), descriptor, loader);
-      CHECK(c != NULL);
+      CHECK(c != nullptr);
       for (size_t j = 0; j < c->NumDirectMethods(); j++) {
         MakeExecutable(c->GetDirectMethod(j));
       }
@@ -101,39 +101,38 @@
 
 // Disabled due to 10 second runtime on host
 TEST_F(CompilerDriverTest, DISABLED_LARGE_CompileDexLibCore) {
-  CompileAll(NULL);
+  CompileAll(nullptr);
 
   // All libcore references should resolve
   ScopedObjectAccess soa(Thread::Current());
-  ASSERT_TRUE(java_lang_dex_file_ != NULL);
+  ASSERT_TRUE(java_lang_dex_file_ != nullptr);
   const DexFile& dex = *java_lang_dex_file_;
   mirror::DexCache* dex_cache = class_linker_->FindDexCache(dex);
   EXPECT_EQ(dex.NumStringIds(), dex_cache->NumStrings());
   for (size_t i = 0; i < dex_cache->NumStrings(); i++) {
     const mirror::String* string = dex_cache->GetResolvedString(i);
-    EXPECT_TRUE(string != NULL) << "string_idx=" << i;
+    EXPECT_TRUE(string != nullptr) << "string_idx=" << i;
   }
   EXPECT_EQ(dex.NumTypeIds(), dex_cache->NumResolvedTypes());
   for (size_t i = 0; i < dex_cache->NumResolvedTypes(); i++) {
     mirror::Class* type = dex_cache->GetResolvedType(i);
-    EXPECT_TRUE(type != NULL) << "type_idx=" << i
+    EXPECT_TRUE(type != nullptr) << "type_idx=" << i
                               << " " << dex.GetTypeDescriptor(dex.GetTypeId(i));
   }
   EXPECT_EQ(dex.NumMethodIds(), dex_cache->NumResolvedMethods());
   for (size_t i = 0; i < dex_cache->NumResolvedMethods(); i++) {
     mirror::ArtMethod* method = dex_cache->GetResolvedMethod(i);
-    EXPECT_TRUE(method != NULL) << "method_idx=" << i
+    EXPECT_TRUE(method != nullptr) << "method_idx=" << i
                                 << " " << dex.GetMethodDeclaringClassDescriptor(dex.GetMethodId(i))
                                 << " " << dex.GetMethodName(dex.GetMethodId(i));
-    EXPECT_TRUE(method->GetEntryPointFromQuickCompiledCode() != NULL) << "method_idx=" << i
-                                           << " "
-                                           << dex.GetMethodDeclaringClassDescriptor(dex.GetMethodId(i))
-                                           << " " << dex.GetMethodName(dex.GetMethodId(i));
+    EXPECT_TRUE(method->GetEntryPointFromQuickCompiledCode() != nullptr) << "method_idx=" << i
+        << " " << dex.GetMethodDeclaringClassDescriptor(dex.GetMethodId(i)) << " "
+        << dex.GetMethodName(dex.GetMethodId(i));
   }
   EXPECT_EQ(dex.NumFieldIds(), dex_cache->NumResolvedFields());
   for (size_t i = 0; i < dex_cache->NumResolvedFields(); i++) {
     ArtField* field = Runtime::Current()->GetClassLinker()->GetResolvedField(i, dex_cache);
-    EXPECT_TRUE(field != NULL) << "field_idx=" << i
+    EXPECT_TRUE(field != nullptr) << "field_idx=" << i
                                << " " << dex.GetFieldDeclaringClassDescriptor(dex.GetFieldId(i))
                                << " " << dex.GetFieldName(dex.GetFieldId(i));
   }
@@ -153,14 +152,14 @@
     CompileDirectMethod(NullHandle<mirror::ClassLoader>(), "java.lang.Object", "<init>", "()V");
     class_loader = LoadDex("AbstractMethod");
   }
-  ASSERT_TRUE(class_loader != NULL);
+  ASSERT_TRUE(class_loader != nullptr);
   EnsureCompiled(class_loader, "AbstractClass", "foo", "()V", true);
 
   // Create a jobj_ of ConcreteClass, NOT AbstractClass.
   jclass c_class = env_->FindClass("ConcreteClass");
   jmethodID constructor = env_->GetMethodID(c_class, "<init>", "()V");
   jobject jobj_ = env_->NewObject(c_class, constructor);
-  ASSERT_TRUE(jobj_ != NULL);
+  ASSERT_TRUE(jobj_ != nullptr);
 
   // Force non-virtual call to AbstractClass foo, will throw AbstractMethodError exception.
   env_->CallNonvirtualVoidMethod(jobj_, class_, mid_);
diff --git a/compiler/driver/dex_compilation_unit.h b/compiler/driver/dex_compilation_unit.h
index 03ae489..3983006 100644
--- a/compiler/driver/dex_compilation_unit.h
+++ b/compiler/driver/dex_compilation_unit.h
@@ -21,6 +21,7 @@
 
 #include "dex_file.h"
 #include "jni.h"
+#include "base/arena_object.h"
 
 namespace art {
 namespace mirror {
@@ -31,7 +32,7 @@
 struct CompilationUnit;
 class VerifiedMethod;
 
-class DexCompilationUnit {
+class DexCompilationUnit : public DeletableArenaObject<kArenaAllocMisc> {
  public:
   explicit DexCompilationUnit(CompilationUnit* cu);
 
diff --git a/compiler/elf_builder.h b/compiler/elf_builder.h
index b67dd26..32c8cce 100644
--- a/compiler/elf_builder.h
+++ b/compiler/elf_builder.h
@@ -374,7 +374,7 @@
   }
 
   Elf_Word GetSize() const {
-    // 1 is for the implicit NULL symbol.
+    // 1 is for the implicit null symbol.
     return symbols_.size() + 1;
   }
 
@@ -578,7 +578,7 @@
       hash_builder_(".hash", SHT_HASH, SHF_ALLOC, &dynsym_builder_, 0, sizeof(Elf_Word),
                     sizeof(Elf_Word)),
       dynamic_builder_(".dynamic", &dynsym_builder_),
-      shstrtab_builder_(".shstrtab", SHT_STRTAB, 0, NULL, 0, 1, 1) {
+      shstrtab_builder_(".shstrtab", SHT_STRTAB, 0, nullptr, 0, 1, 1) {
     SetupEhdr();
     SetupDynamic();
     SetupRequiredSymbols();
@@ -689,7 +689,7 @@
     // +-------------------------+  (Optional)
     // | .debug_line             |  (Optional)
     // +-------------------------+  (Optional)
-    // | Elf_Shdr NULL           |
+    // | Elf_Shdr null           |
     // | Elf_Shdr .dynsym        |
     // | Elf_Shdr .dynstr        |
     // | Elf_Shdr .hash          |
diff --git a/compiler/elf_writer_quick.cc b/compiler/elf_writer_quick.cc
index 949fcab..3b2ca94 100644
--- a/compiler/elf_writer_quick.cc
+++ b/compiler/elf_writer_quick.cc
@@ -148,7 +148,7 @@
   RawSection debug_abbrev(".debug_abbrev", SHT_PROGBITS, 0, nullptr, 0, 1, 0);
   RawSection debug_str(".debug_str", SHT_PROGBITS, 0, nullptr, 0, 1, 0);
   RawSection debug_line(".debug_line", SHT_PROGBITS, 0, nullptr, 0, 1, 0);
-  RawSection oat_patches(".oat_patches", SHT_OAT_PATCH, 0, NULL, 0, 1, 0);
+  RawSection oat_patches(".oat_patches", SHT_OAT_PATCH, 0, nullptr, 0, 1, 0);
 
   // Do not add to .oat_patches since we will make the addresses relative.
   std::vector<uintptr_t> eh_frame_patches;
diff --git a/compiler/elf_writer_test.cc b/compiler/elf_writer_test.cc
index 3e5ad7b..08523d8 100644
--- a/compiler/elf_writer_test.cc
+++ b/compiler/elf_writer_test.cc
@@ -55,12 +55,12 @@
   LOG(INFO) << "elf_filename=" << elf_filename;
 
   UnreserveImageSpace();
-  void* dl_oatdata = NULL;
-  void* dl_oatexec = NULL;
-  void* dl_oatlastword = NULL;
+  void* dl_oatdata = nullptr;
+  void* dl_oatexec = nullptr;
+  void* dl_oatlastword = nullptr;
 
   std::unique_ptr<File> file(OS::OpenFileForReading(elf_filename.c_str()));
-  ASSERT_TRUE(file.get() != NULL);
+  ASSERT_TRUE(file.get() != nullptr);
   {
     std::string error_msg;
     std::unique_ptr<ElfFile> ef(ElfFile::Open(file.get(), false, false, &error_msg));
diff --git a/compiler/image_test.cc b/compiler/image_test.cc
index 8016831..eaf3489 100644
--- a/compiler/image_test.cc
+++ b/compiler/image_test.cc
@@ -68,7 +68,7 @@
   // TODO: compile_pic should be a test argument.
   {
     {
-      jobject class_loader = NULL;
+      jobject class_loader = nullptr;
       ClassLinker* class_linker = Runtime::Current()->GetClassLinker();
       TimingLogger timings("ImageTest::WriteRead", false, false);
       TimingLogger::ScopedTiming t("CompileAll", &timings);
@@ -92,7 +92,7 @@
   }
   // Workound bug that mcld::Linker::emit closes oat_file by reopening as dup_oat.
   std::unique_ptr<File> dup_oat(OS::OpenFileReadWrite(oat_file.GetFilename().c_str()));
-  ASSERT_TRUE(dup_oat.get() != NULL);
+  ASSERT_TRUE(dup_oat.get() != nullptr);
 
   {
     bool success_image =
@@ -107,7 +107,7 @@
 
   {
     std::unique_ptr<File> file(OS::OpenFileForReading(image_file.GetFilename().c_str()));
-    ASSERT_TRUE(file.get() != NULL);
+    ASSERT_TRUE(file.get() != nullptr);
     ImageHeader image_header;
     ASSERT_EQ(file->ReadFully(&image_header, sizeof(image_header)), true);
     ASSERT_TRUE(image_header.IsValid());
@@ -118,12 +118,12 @@
     ASSERT_TRUE(!heap->GetContinuousSpaces().empty());
     gc::space::ContinuousSpace* space = heap->GetNonMovingSpace();
     ASSERT_FALSE(space->IsImageSpace());
-    ASSERT_TRUE(space != NULL);
+    ASSERT_TRUE(space != nullptr);
     ASSERT_TRUE(space->IsMallocSpace());
     ASSERT_GE(sizeof(image_header) + space->Size(), static_cast<size_t>(file->GetLength()));
   }
 
-  ASSERT_TRUE(compiler_driver_->GetImageClasses() != NULL);
+  ASSERT_TRUE(compiler_driver_->GetImageClasses() != nullptr);
   std::unordered_set<std::string> image_classes(*compiler_driver_->GetImageClasses());
 
   // Need to delete the compiler since it has worker threads which are attached to runtime.
@@ -137,7 +137,7 @@
   writer.reset(nullptr);
 
   runtime_.reset();
-  java_lang_dex_file_ = NULL;
+  java_lang_dex_file_ = nullptr;
 
   MemMap::Init();
   std::unique_ptr<const DexFile> dex(LoadExpectSingleDexFile(GetLibCoreDexFileName().c_str()));
@@ -145,7 +145,7 @@
   RuntimeOptions options;
   std::string image("-Ximage:");
   image.append(image_location.GetFilename());
-  options.push_back(std::make_pair(image.c_str(), reinterpret_cast<void*>(NULL)));
+  options.push_back(std::make_pair(image.c_str(), static_cast<void*>(nullptr)));
   // By default the compiler this creates will not include patch information.
   options.push_back(std::make_pair("-Xnorelocate", nullptr));
 
@@ -158,7 +158,7 @@
   // give it away now and then switch to a more managable ScopedObjectAccess.
   Thread::Current()->TransitionFromRunnableToSuspended(kNative);
   ScopedObjectAccess soa(Thread::Current());
-  ASSERT_TRUE(runtime_.get() != NULL);
+  ASSERT_TRUE(runtime_.get() != nullptr);
   class_linker_ = runtime_->GetClassLinker();
 
   gc::Heap* heap = Runtime::Current()->GetHeap();
diff --git a/compiler/image_writer.cc b/compiler/image_writer.cc
index a99ef34..fc70d8f 100644
--- a/compiler/image_writer.cc
+++ b/compiler/image_writer.cc
@@ -129,7 +129,7 @@
   ClassLinker* class_linker = Runtime::Current()->GetClassLinker();
 
   std::unique_ptr<File> oat_file(OS::OpenFileReadWrite(oat_filename.c_str()));
-  if (oat_file.get() == NULL) {
+  if (oat_file.get() == nullptr) {
     PLOG(ERROR) << "Failed to open oat file " << oat_filename << " for " << oat_location;
     return false;
   }
@@ -180,7 +180,7 @@
 
   std::unique_ptr<File> image_file(OS::CreateEmptyFile(image_filename.c_str()));
   ImageHeader* image_header = reinterpret_cast<ImageHeader*>(image_->Begin());
-  if (image_file.get() == NULL) {
+  if (image_file.get() == nullptr) {
     LOG(ERROR) << "Failed to open image file " << image_filename;
     return false;
   }
@@ -519,7 +519,7 @@
 
 void ImageWriter::ComputeLazyFieldsForImageClasses() {
   ClassLinker* class_linker = Runtime::Current()->GetClassLinker();
-  class_linker->VisitClassesWithoutClassesLock(ComputeLazyFieldsForClassesVisitor, NULL);
+  class_linker->VisitClassesWithoutClassesLock(ComputeLazyFieldsForClassesVisitor, nullptr);
 }
 
 bool ImageWriter::ComputeLazyFieldsForClassesVisitor(Class* c, void* /*arg*/) {
@@ -675,7 +675,7 @@
     if (string_id != nullptr) {
       // This string occurs in this dex file, assign the dex cache entry.
       uint32_t string_idx = dex_file.GetIndexForStringId(*string_id);
-      if (dex_cache->GetResolvedString(string_idx) == NULL) {
+      if (dex_cache->GetResolvedString(string_idx) == nullptr) {
         dex_cache->SetResolvedString(string_idx, string);
       }
     }
@@ -697,7 +697,7 @@
 };
 
 void ImageWriter::PruneNonImageClasses() {
-  if (compiler_driver_.GetImageClasses() == NULL) {
+  if (compiler_driver_.GetImageClasses() == nullptr) {
     return;
   }
   Runtime* runtime = Runtime::Current();
@@ -712,7 +712,7 @@
 
   // Remove the undesired classes from the class roots.
   for (const std::string& it : non_image_classes) {
-    bool result = class_linker->RemoveClass(it.c_str(), NULL);
+    bool result = class_linker->RemoveClass(it.c_str(), nullptr);
     DCHECK(result);
   }
 
@@ -724,13 +724,13 @@
     DexCache* dex_cache = class_linker->GetDexCache(idx);
     for (size_t i = 0; i < dex_cache->NumResolvedTypes(); i++) {
       Class* klass = dex_cache->GetResolvedType(i);
-      if (klass != NULL && !IsImageClass(klass)) {
-        dex_cache->SetResolvedType(i, NULL);
+      if (klass != nullptr && !IsImageClass(klass)) {
+        dex_cache->SetResolvedType(i, nullptr);
       }
     }
     for (size_t i = 0; i < dex_cache->NumResolvedMethods(); i++) {
       ArtMethod* method = dex_cache->GetResolvedMethod(i);
-      if (method != NULL && !IsImageClass(method->GetDeclaringClass())) {
+      if (method != nullptr && !IsImageClass(method->GetDeclaringClass())) {
         dex_cache->SetResolvedMethod(i, resolution_method);
       }
     }
@@ -777,14 +777,14 @@
 
 void ImageWriter::DumpImageClasses() {
   auto image_classes = compiler_driver_.GetImageClasses();
-  CHECK(image_classes != NULL);
+  CHECK(image_classes != nullptr);
   for (const std::string& image_class : *image_classes) {
     LOG(INFO) << " " << image_class;
   }
 }
 
 void ImageWriter::CalculateObjectBinSlots(Object* obj) {
-  DCHECK(obj != NULL);
+  DCHECK(obj != nullptr);
   // if it is a string, we want to intern it if its not interned.
   if (obj->GetClass()->IsStringClass()) {
     // we must be an interned string that was forward referenced and already assigned
@@ -856,7 +856,7 @@
   image_roots->Set<false>(ImageHeader::kDexCaches, dex_caches.Get());
   image_roots->Set<false>(ImageHeader::kClassRoots, class_linker->GetClassRoots());
   for (int i = 0; i < ImageHeader::kImageRootsMax; i++) {
-    CHECK(image_roots->Get(i) != NULL);
+    CHECK(image_roots->Get(i) != nullptr);
   }
   return image_roots.Get();
 }
diff --git a/compiler/jni/quick/calling_convention.cc b/compiler/jni/quick/calling_convention.cc
index d25acc7..436fc0c 100644
--- a/compiler/jni/quick/calling_convention.cc
+++ b/compiler/jni/quick/calling_convention.cc
@@ -47,7 +47,7 @@
       return new x86_64::X86_64ManagedRuntimeCallingConvention(is_static, is_synchronized, shorty);
     default:
       LOG(FATAL) << "Unknown InstructionSet: " << instruction_set;
-      return NULL;
+      return nullptr;
   }
 }
 
@@ -122,7 +122,7 @@
       return new x86_64::X86_64JniCallingConvention(is_static, is_synchronized, shorty);
     default:
       LOG(FATAL) << "Unknown InstructionSet: " << instruction_set;
-      return NULL;
+      return nullptr;
   }
 }
 
diff --git a/compiler/jni/quick/jni_compiler.cc b/compiler/jni/quick/jni_compiler.cc
index 2402ea5..6f2cb25 100644
--- a/compiler/jni/quick/jni_compiler.cc
+++ b/compiler/jni/quick/jni_compiler.cc
@@ -152,9 +152,9 @@
     // References need placing in handle scope and the entry value passing
     if (ref_param) {
       // Compute handle scope entry, note null is placed in the handle scope but its boxed value
-      // must be NULL
+      // must be null.
       FrameOffset handle_scope_offset = main_jni_conv->CurrentParamHandleScopeEntryOffset();
-      // Check handle scope offset is within frame and doesn't run into the saved segment state
+      // Check handle scope offset is within frame and doesn't run into the saved segment state.
       CHECK_LT(handle_scope_offset.Uint32Value(), frame_size);
       CHECK_NE(handle_scope_offset.Uint32Value(),
                main_jni_conv->SavedLocalReferenceCookieOffset().Uint32Value());
@@ -243,9 +243,9 @@
   // 7. Iterate over arguments placing values from managed calling convention in
   //    to the convention required for a native call (shuffling). For references
   //    place an index/pointer to the reference after checking whether it is
-  //    NULL (which must be encoded as NULL).
+  //    null (which must be encoded as null).
   //    Note: we do this prior to materializing the JNIEnv* and static's jclass to
-  //    give as many free registers for the shuffle as possible
+  //    give as many free registers for the shuffle as possible.
   mr_conv->ResetIterator(FrameOffset(frame_size + main_out_arg_size));
   uint32_t args_count = 0;
   while (mr_conv->HasNext()) {
@@ -451,7 +451,7 @@
                                                  ArrayRef<const LinkerPatch>());
 }
 
-// Copy a single parameter from the managed to the JNI calling convention
+// Copy a single parameter from the managed to the JNI calling convention.
 static void CopyParameter(Assembler* jni_asm,
                           ManagedRuntimeCallingConvention* mr_conv,
                           JniCallingConvention* jni_conv,
@@ -469,7 +469,7 @@
   } else {
     CHECK(jni_conv->IsCurrentParamOnStack());
   }
-  // References need placing in handle scope and the entry address passing
+  // References need placing in handle scope and the entry address passing.
   if (ref_param) {
     null_allowed = mr_conv->IsCurrentArgPossiblyNull();
     // Compute handle scope offset. Note null is placed in the handle scope but the jobject
diff --git a/compiler/oat_writer.cc b/compiler/oat_writer.cc
index 5abd204..d2d38da 100644
--- a/compiler/oat_writer.cc
+++ b/compiler/oat_writer.cc
@@ -73,7 +73,7 @@
     image_file_location_oat_begin_(image_file_location_oat_begin),
     image_patch_delta_(image_patch_delta),
     key_value_store_(key_value_store),
-    oat_header_(NULL),
+    oat_header_(nullptr),
     size_dex_file_alignment_(0),
     size_executable_offset_alignment_(0),
     size_oat_header_(0),
@@ -326,7 +326,7 @@
     ClassReference class_ref(dex_file_, class_def_index_);
     CompiledClass* compiled_class = writer_->compiler_driver_->GetCompiledClass(class_ref);
     mirror::Class::Status status;
-    if (compiled_class != NULL) {
+    if (compiled_class != nullptr) {
       status = compiled_class->GetStatus();
     } else if (writer_->compiler_driver_->GetVerificationResults()->IsClassRejected(class_ref)) {
       status = mirror::Class::kStatusError;
@@ -473,7 +473,7 @@
         ClassReference class_ref(dex_file_, class_def_index_);
         CompiledClass* compiled_class = compiler_driver->GetCompiledClass(class_ref);
         mirror::Class::Status status;
-        if (compiled_class != NULL) {
+        if (compiled_class != nullptr) {
           status = compiled_class->GetStatus();
         } else if (compiler_driver->GetVerificationResults()->IsClassRejected(class_ref)) {
           status = mirror::Class::kStatusError;
@@ -690,7 +690,7 @@
     OatClass* oat_class = writer_->oat_classes_[oat_class_index_];
     const CompiledMethod* compiled_method = oat_class->GetCompiledMethod(class_def_method_index);
 
-    if (compiled_method != NULL) {  // ie. not an abstract method
+    if (compiled_method != nullptr) {  // ie. not an abstract method
       size_t file_offset = file_offset_;
       OutputStream* out = out_;
 
@@ -893,7 +893,7 @@
     OatClass* oat_class = writer_->oat_classes_[oat_class_index_];
     const CompiledMethod* compiled_method = oat_class->GetCompiledMethod(class_def_method_index);
 
-    if (compiled_method != NULL) {  // ie. not an abstract method
+    if (compiled_method != nullptr) {  // ie. not an abstract method
       size_t file_offset = file_offset_;
       OutputStream* out = out_;
 
@@ -940,7 +940,7 @@
       }
       const DexFile::ClassDef& class_def = dex_file->GetClassDef(class_def_index);
       const uint8_t* class_data = dex_file->GetClassData(class_def);
-      if (class_data != NULL) {  // ie not an empty class, such as a marker interface
+      if (class_data != nullptr) {  // ie not an empty class, such as a marker interface
         ClassDataItemIterator it(*dex_file, class_data);
         while (it.HasNextStaticField()) {
           it.Next();
@@ -987,7 +987,7 @@
   // create the OatDexFiles
   for (size_t i = 0; i != dex_files_->size(); ++i) {
     const DexFile* dex_file = (*dex_files_)[i];
-    CHECK(dex_file != NULL);
+    CHECK(dex_file != nullptr);
     OatDexFile* oat_dex_file = new OatDexFile(offset, *dex_file);
     oat_dex_files_.push_back(oat_dex_file);
     offset += oat_dex_file->SizeOf();
@@ -1471,13 +1471,13 @@
     oat_method_offsets_offset_from_oat_class += sizeof(method_bitmap_size_);
     oat_method_offsets_offset_from_oat_class += method_bitmap_size_;
   } else {
-    method_bitmap_ = NULL;
+    method_bitmap_ = nullptr;
     method_bitmap_size_ = 0;
   }
 
   for (size_t i = 0; i < num_methods; i++) {
     CompiledMethod* compiled_method = compiled_methods_[i];
-    if (compiled_method == NULL) {
+    if (compiled_method == nullptr) {
       oat_method_offsets_offsets_from_oat_class_[i] = 0;
     } else {
       oat_method_offsets_offsets_from_oat_class_[i] = oat_method_offsets_offset_from_oat_class;
diff --git a/compiler/oat_writer.h b/compiler/oat_writer.h
index cc2b39a..8c79b44 100644
--- a/compiler/oat_writer.h
+++ b/compiler/oat_writer.h
@@ -235,13 +235,13 @@
     // used to validate file position when writing.
     size_t offset_;
 
-    // CompiledMethods for each class_def_method_index, or NULL if no method is available.
+    // CompiledMethods for each class_def_method_index, or null if no method is available.
     std::vector<CompiledMethod*> compiled_methods_;
 
     // Offset from OatClass::offset_ to the OatMethodOffsets for the
     // class_def_method_index. If 0, it means the corresponding
     // CompiledMethod entry in OatClass::compiled_methods_ should be
-    // NULL and that the OatClass::type_ should be kOatClassBitmap.
+    // null and that the OatClass::type_ should be kOatClassBitmap.
     std::vector<uint32_t> oat_method_offsets_offsets_from_oat_class_;
 
     // data to write
@@ -258,12 +258,12 @@
     // OatClassType::type_ is kOatClassBitmap, a set bit indicates the
     // method has an OatMethodOffsets in methods_offsets_, otherwise
     // the entry was ommited to save space. If OatClassType::type_ is
-    // not is kOatClassBitmap, the bitmap will be NULL.
+    // not is kOatClassBitmap, the bitmap will be null.
     BitVector* method_bitmap_;
 
     // OatMethodOffsets and OatMethodHeaders for each CompiledMethod
     // present in the OatClass. Note that some may be missing if
-    // OatClass::compiled_methods_ contains NULL values (and
+    // OatClass::compiled_methods_ contains null values (and
     // oat_method_offsets_offsets_from_oat_class_ should contain 0
     // values in this case).
     std::vector<OatMethodOffsets> method_offsets_;
diff --git a/compiler/optimizing/gvn.cc b/compiler/optimizing/gvn.cc
index 74848d5..708733e 100644
--- a/compiler/optimizing/gvn.cc
+++ b/compiler/optimizing/gvn.cc
@@ -55,7 +55,7 @@
         buckets_owned_(allocator, num_buckets_, false),
         num_entries_(to_copy.num_entries_) {
     // ArenaAllocator returns zeroed memory, so entries of buckets_ and
-    // buckets_owned_ are initialized to nullptr and false, respectively.
+    // buckets_owned_ are initialized to null and false, respectively.
     DCHECK(IsPowerOfTwo(num_buckets_));
     if (num_buckets_ == to_copy.num_buckets_) {
       // Hash table remains the same size. We copy the bucket pointers and leave
diff --git a/compiler/optimizing/nodes.cc b/compiler/optimizing/nodes.cc
index bef5896..6ab57b8 100644
--- a/compiler/optimizing/nodes.cc
+++ b/compiler/optimizing/nodes.cc
@@ -714,7 +714,7 @@
     // TODO: Implement static evaluation of long unary operations.
     //
     // Do not exit with a fatal condition here.  Instead, simply
-    // return `nullptr' to notify the caller that this instruction
+    // return `null' to notify the caller that this instruction
     // cannot (yet) be statically evaluated.
     return nullptr;
   }
@@ -750,7 +750,7 @@
 }
 
 // If `GetConstantRight()` returns one of the input, this returns the other
-// one. Otherwise it returns nullptr.
+// one. Otherwise it returns null.
 HInstruction* HBinaryOperation::GetLeastConstantLeft() const {
   HInstruction* most_constant_right = GetConstantRight();
   if (most_constant_right == nullptr) {
diff --git a/compiler/optimizing/nodes.h b/compiler/optimizing/nodes.h
index 1a24cb5..0993a18 100644
--- a/compiler/optimizing/nodes.h
+++ b/compiler/optimizing/nodes.h
@@ -1634,7 +1634,7 @@
 
   // Try to statically evaluate `operation` and return a HConstant
   // containing the result of this evaluation.  If `operation` cannot
-  // be evaluated as a constant, return nullptr.
+  // be evaluated as a constant, return null.
   HConstant* TryStaticEvaluation() const;
 
   // Apply this operation to `x`.
@@ -1702,7 +1702,7 @@
 
   // Try to statically evaluate `operation` and return a HConstant
   // containing the result of this evaluation.  If `operation` cannot
-  // be evaluated as a constant, return nullptr.
+  // be evaluated as a constant, return null.
   HConstant* TryStaticEvaluation() const;
 
   // Apply this operation to `x` and `y`.
@@ -1710,11 +1710,11 @@
   virtual int64_t Evaluate(int64_t x, int64_t y) const = 0;
 
   // Returns an input that can legally be used as the right input and is
-  // constant, or nullptr.
+  // constant, or null.
   HConstant* GetConstantRight() const;
 
   // If `GetConstantRight()` returns one of the input, this returns the other
-  // one. Otherwise it returns nullptr.
+  // one. Otherwise it returns null.
   HInstruction* GetLeastConstantLeft() const;
 
   DECLARE_INSTRUCTION(BinaryOperation);
diff --git a/compiler/optimizing/ssa_liveness_analysis.h b/compiler/optimizing/ssa_liveness_analysis.h
index 03f5545..fe70d3a 100644
--- a/compiler/optimizing/ssa_liveness_analysis.h
+++ b/compiler/optimizing/ssa_liveness_analysis.h
@@ -333,7 +333,8 @@
     }
     if (after_loop == nullptr) {
       // Uses are only in the loop.
-      first_range_ = last_range_ = range_search_start_ = new (allocator_) LiveRange(start, end, nullptr);
+      first_range_ = last_range_ = range_search_start_ =
+          new (allocator_) LiveRange(start, end, nullptr);
     } else if (after_loop->GetStart() <= end) {
       first_range_ = range_search_start_ = after_loop;
       // There are uses after the loop.
@@ -596,7 +597,7 @@
         previous->next_ = nullptr;
         new_interval->first_range_ = current;
         if (range_search_start_ != nullptr && range_search_start_->GetEnd() >= current->GetEnd()) {
-          // Search start point is inside `new_interval`. Change it to nullptr
+          // Search start point is inside `new_interval`. Change it to null
           // (i.e. the end of the interval) in the original interval.
           range_search_start_ = nullptr;
         }
@@ -863,7 +864,7 @@
         defined_by_(defined_by) {}
 
   // Searches for a LiveRange that either covers the given position or is the
-  // first next LiveRange. Returns nullptr if no such LiveRange exists. Ranges
+  // first next LiveRange. Returns null if no such LiveRange exists. Ranges
   // known to end before `position` can be skipped with `search_start`.
   LiveRange* FindRangeAtOrAfter(size_t position, LiveRange* search_start) const {
     if (kIsDebugBuild) {
diff --git a/compiler/output_stream_test.cc b/compiler/output_stream_test.cc
index bba9892..fbc9d0d 100644
--- a/compiler/output_stream_test.cc
+++ b/compiler/output_stream_test.cc
@@ -66,7 +66,7 @@
   SetOutputStream(output_stream);
   GenerateTestOutput();
   std::unique_ptr<File> in(OS::OpenFileForReading(tmp.GetFilename().c_str()));
-  EXPECT_TRUE(in.get() != NULL);
+  EXPECT_TRUE(in.get() != nullptr);
   std::vector<uint8_t> actual(in->GetLength());
   bool readSuccess = in->ReadFully(&actual[0], actual.size());
   EXPECT_TRUE(readSuccess);
@@ -76,12 +76,12 @@
 TEST_F(OutputStreamTest, Buffered) {
   ScratchFile tmp;
   std::unique_ptr<FileOutputStream> file_output_stream(new FileOutputStream(tmp.GetFile()));
-  CHECK(file_output_stream.get() != NULL);
+  CHECK(file_output_stream.get() != nullptr);
   BufferedOutputStream buffered_output_stream(file_output_stream.release());
   SetOutputStream(buffered_output_stream);
   GenerateTestOutput();
   std::unique_ptr<File> in(OS::OpenFileForReading(tmp.GetFilename().c_str()));
-  EXPECT_TRUE(in.get() != NULL);
+  EXPECT_TRUE(in.get() != nullptr);
   std::vector<uint8_t> actual(in->GetLength());
   bool readSuccess = in->ReadFully(&actual[0], actual.size());
   EXPECT_TRUE(readSuccess);
diff --git a/compiler/utils/arm/assembler_arm.h b/compiler/utils/arm/assembler_arm.h
index dd0dba2..313f365 100644
--- a/compiler/utils/arm/assembler_arm.h
+++ b/compiler/utils/arm/assembler_arm.h
@@ -739,17 +739,17 @@
   void GetCurrentThread(ManagedRegister tr) OVERRIDE;
   void GetCurrentThread(FrameOffset dest_offset, ManagedRegister scratch) OVERRIDE;
 
-  // Set up out_reg to hold a Object** into the handle scope, or to be NULL if the
+  // Set up out_reg to hold a Object** into the handle scope, or to be null if the
   // value is null and null_allowed. in_reg holds a possibly stale reference
   // that can be used to avoid loading the handle scope entry to see if the value is
-  // NULL.
-  void CreateHandleScopeEntry(ManagedRegister out_reg, FrameOffset handlescope_offset, ManagedRegister in_reg,
-                       bool null_allowed) OVERRIDE;
+  // null.
+  void CreateHandleScopeEntry(ManagedRegister out_reg, FrameOffset handlescope_offset,
+                              ManagedRegister in_reg, bool null_allowed) OVERRIDE;
 
-  // Set up out_off to hold a Object** into the handle scope, or to be NULL if the
+  // Set up out_off to hold a Object** into the handle scope, or to be null if the
   // value is null and null_allowed.
-  void CreateHandleScopeEntry(FrameOffset out_off, FrameOffset handlescope_offset, ManagedRegister scratch,
-                       bool null_allowed) OVERRIDE;
+  void CreateHandleScopeEntry(FrameOffset out_off, FrameOffset handlescope_offset,
+                              ManagedRegister scratch, bool null_allowed) OVERRIDE;
 
   // src holds a handle scope entry (Object**) load this into dst
   void LoadReferenceFromHandleScope(ManagedRegister dst, ManagedRegister src) OVERRIDE;
diff --git a/compiler/utils/arm64/assembler_arm64.h b/compiler/utils/arm64/assembler_arm64.h
index b7715af..e47b531 100644
--- a/compiler/utils/arm64/assembler_arm64.h
+++ b/compiler/utils/arm64/assembler_arm64.h
@@ -149,14 +149,14 @@
   void GetCurrentThread(ManagedRegister tr) OVERRIDE;
   void GetCurrentThread(FrameOffset dest_offset, ManagedRegister scratch) OVERRIDE;
 
-  // Set up out_reg to hold a Object** into the handle scope, or to be NULL if the
+  // Set up out_reg to hold a Object** into the handle scope, or to be null if the
   // value is null and null_allowed. in_reg holds a possibly stale reference
   // that can be used to avoid loading the handle scope entry to see if the value is
-  // NULL.
+  // null.
   void CreateHandleScopeEntry(ManagedRegister out_reg, FrameOffset handlescope_offset,
                        ManagedRegister in_reg, bool null_allowed) OVERRIDE;
 
-  // Set up out_off to hold a Object** into the handle scope, or to be NULL if the
+  // Set up out_off to hold a Object** into the handle scope, or to be null if the
   // value is null and null_allowed.
   void CreateHandleScopeEntry(FrameOffset out_off, FrameOffset handlescope_offset,
                        ManagedRegister scratch, bool null_allowed) OVERRIDE;
diff --git a/compiler/utils/assembler.cc b/compiler/utils/assembler.cc
index 36342c6..b016e74 100644
--- a/compiler/utils/assembler.cc
+++ b/compiler/utils/assembler.cc
@@ -41,8 +41,8 @@
   contents_ = NewContents(kInitialBufferCapacity);
   cursor_ = contents_;
   limit_ = ComputeLimit(contents_, kInitialBufferCapacity);
-  fixup_ = NULL;
-  slow_path_ = NULL;
+  fixup_ = nullptr;
+  slow_path_ = nullptr;
 #ifndef NDEBUG
   has_ensured_capacity_ = false;
   fixups_processed_ = false;
@@ -61,7 +61,7 @@
 
 void AssemblerBuffer::ProcessFixups(const MemoryRegion& region) {
   AssemblerFixup* fixup = fixup_;
-  while (fixup != NULL) {
+  while (fixup != nullptr) {
     fixup->Process(region, fixup->position());
     fixup = fixup->previous();
   }
@@ -127,7 +127,7 @@
       return new x86_64::X86_64Assembler();
     default:
       LOG(FATAL) << "Unknown InstructionSet: " << instruction_set;
-      return NULL;
+      return nullptr;
   }
 }
 
diff --git a/compiler/utils/assembler.h b/compiler/utils/assembler.h
index ebafd3d..2e3a47b 100644
--- a/compiler/utils/assembler.h
+++ b/compiler/utils/assembler.h
@@ -156,7 +156,7 @@
 // Parent of all queued slow paths, emitted during finalization
 class SlowPath {
  public:
-  SlowPath() : next_(NULL) {}
+  SlowPath() : next_(nullptr) {}
   virtual ~SlowPath() {}
 
   Label* Continuation() { return &continuation_; }
@@ -216,20 +216,20 @@
   }
 
   void EnqueueSlowPath(SlowPath* slowpath) {
-    if (slow_path_ == NULL) {
+    if (slow_path_ == nullptr) {
       slow_path_ = slowpath;
     } else {
       SlowPath* cur = slow_path_;
-      for ( ; cur->next_ != NULL ; cur = cur->next_) {}
+      for ( ; cur->next_ != nullptr ; cur = cur->next_) {}
       cur->next_ = slowpath;
     }
   }
 
   void EmitSlowPaths(Assembler* sp_asm) {
     SlowPath* cur = slow_path_;
-    SlowPath* next = NULL;
-    slow_path_ = NULL;
-    for ( ; cur != NULL ; cur = next) {
+    SlowPath* next = nullptr;
+    slow_path_ = nullptr;
+    for ( ; cur != nullptr ; cur = next) {
       cur->Emit(sp_asm);
       next = cur->next_;
       delete cur;
@@ -489,14 +489,14 @@
   virtual void GetCurrentThread(FrameOffset dest_offset,
                                 ManagedRegister scratch) = 0;
 
-  // Set up out_reg to hold a Object** into the handle scope, or to be NULL if the
+  // Set up out_reg to hold a Object** into the handle scope, or to be null if the
   // value is null and null_allowed. in_reg holds a possibly stale reference
   // that can be used to avoid loading the handle scope entry to see if the value is
-  // NULL.
+  // null.
   virtual void CreateHandleScopeEntry(ManagedRegister out_reg, FrameOffset handlescope_offset,
                                ManagedRegister in_reg, bool null_allowed) = 0;
 
-  // Set up out_off to hold a Object** into the handle scope, or to be NULL if the
+  // Set up out_off to hold a Object** into the handle scope, or to be null if the
   // value is null and null_allowed.
   virtual void CreateHandleScopeEntry(FrameOffset out_off, FrameOffset handlescope_offset,
                                ManagedRegister scratch, bool null_allowed) = 0;
diff --git a/compiler/utils/dedupe_set.h b/compiler/utils/dedupe_set.h
index b062a2a..a9a5781 100644
--- a/compiler/utils/dedupe_set.h
+++ b/compiler/utils/dedupe_set.h
@@ -40,8 +40,8 @@
   struct HashedKey {
     StoreKey* store_ptr;
     union {
-      HashType store_hash;        // Valid if store_ptr != nullptr.
-      const HashedInKey* in_key;  // Valid if store_ptr == nullptr.
+      HashType store_hash;        // Valid if store_ptr != null.
+      const HashedInKey* in_key;  // Valid if store_ptr == null.
     };
   };
 
diff --git a/compiler/utils/mips/assembler_mips.h b/compiler/utils/mips/assembler_mips.h
index 216cb41..d4acf03 100644
--- a/compiler/utils/mips/assembler_mips.h
+++ b/compiler/utils/mips/assembler_mips.h
@@ -238,17 +238,17 @@
   void GetCurrentThread(ManagedRegister tr) OVERRIDE;
   void GetCurrentThread(FrameOffset dest_offset, ManagedRegister mscratch) OVERRIDE;
 
-  // Set up out_reg to hold a Object** into the handle scope, or to be NULL if the
+  // Set up out_reg to hold a Object** into the handle scope, or to be null if the
   // value is null and null_allowed. in_reg holds a possibly stale reference
   // that can be used to avoid loading the handle scope entry to see if the value is
-  // NULL.
-  void CreateHandleScopeEntry(ManagedRegister out_reg, FrameOffset handlescope_offset, ManagedRegister in_reg,
-                       bool null_allowed) OVERRIDE;
+  // null.
+  void CreateHandleScopeEntry(ManagedRegister out_reg, FrameOffset handlescope_offset,
+                              ManagedRegister in_reg, bool null_allowed) OVERRIDE;
 
-  // Set up out_off to hold a Object** into the handle scope, or to be NULL if the
+  // Set up out_off to hold a Object** into the handle scope, or to be null if the
   // value is null and null_allowed.
-  void CreateHandleScopeEntry(FrameOffset out_off, FrameOffset handlescope_offset, ManagedRegister mscratch,
-                       bool null_allowed) OVERRIDE;
+  void CreateHandleScopeEntry(FrameOffset out_off, FrameOffset handlescope_offset,
+                              ManagedRegister mscratch, bool null_allowed) OVERRIDE;
 
   // src holds a handle scope entry (Object**) load this into dst
   void LoadReferenceFromHandleScope(ManagedRegister dst, ManagedRegister src) OVERRIDE;
diff --git a/compiler/utils/mips64/assembler_mips64.h b/compiler/utils/mips64/assembler_mips64.h
index 36e74d7..b7f6a9e 100644
--- a/compiler/utils/mips64/assembler_mips64.h
+++ b/compiler/utils/mips64/assembler_mips64.h
@@ -235,14 +235,14 @@
   void GetCurrentThread(ManagedRegister tr) OVERRIDE;
   void GetCurrentThread(FrameOffset dest_offset, ManagedRegister mscratch) OVERRIDE;
 
-  // Set up out_reg to hold a Object** into the handle scope, or to be NULL if the
+  // Set up out_reg to hold a Object** into the handle scope, or to be null if the
   // value is null and null_allowed. in_reg holds a possibly stale reference
   // that can be used to avoid loading the handle scope entry to see if the value is
-  // NULL.
+  // null.
   void CreateHandleScopeEntry(ManagedRegister out_reg, FrameOffset handlescope_offset,
                               ManagedRegister in_reg, bool null_allowed) OVERRIDE;
 
-  // Set up out_off to hold a Object** into the handle scope, or to be NULL if the
+  // Set up out_off to hold a Object** into the handle scope, or to be null if the
   // value is null and null_allowed.
   void CreateHandleScopeEntry(FrameOffset out_off, FrameOffset handlescope_offset, ManagedRegister
                               mscratch, bool null_allowed) OVERRIDE;
diff --git a/compiler/utils/test_dex_file_builder.h b/compiler/utils/test_dex_file_builder.h
new file mode 100644
index 0000000..ab039aa
--- /dev/null
+++ b/compiler/utils/test_dex_file_builder.h
@@ -0,0 +1,372 @@
+/*
+ * Copyright (C) 2015 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef ART_COMPILER_UTILS_TEST_DEX_FILE_BUILDER_H_
+#define ART_COMPILER_UTILS_TEST_DEX_FILE_BUILDER_H_
+
+#include <cstring>
+#include <set>
+#include <map>
+#include <vector>
+
+#include "dex_file.h"
+#include "utils.h"
+
+namespace art {
+
+class TestDexFileBuilder {
+ public:
+  TestDexFileBuilder()
+      : strings_(), types_(), fields_(), protos_(), dex_file_data_() {
+  }
+
+  void AddString(const std::string& str) {
+    CHECK(dex_file_data_.empty());
+    auto it = strings_.emplace(str, IdxAndDataOffset()).first;
+    CHECK_LT(it->first.length(), 128u);  // Don't allow multi-byte length in uleb128.
+  }
+
+  void AddType(const std::string& descriptor) {
+    CHECK(dex_file_data_.empty());
+    AddString(descriptor);
+    types_.emplace(descriptor, 0u);
+  }
+
+  void AddField(const std::string& class_descriptor, const std::string& type,
+                const std::string& name) {
+    CHECK(dex_file_data_.empty());
+    AddType(class_descriptor);
+    AddType(type);
+    AddString(name);
+    FieldKey key = { class_descriptor, type, name };
+    fields_.emplace(key, 0u);
+  }
+
+  void AddMethod(const std::string& class_descriptor, const std::string& signature,
+                 const std::string& name) {
+    CHECK(dex_file_data_.empty());
+    AddType(class_descriptor);
+    AddString(name);
+
+    ProtoKey proto_key = CreateProtoKey(signature);
+    AddString(proto_key.shorty);
+    AddType(proto_key.return_type);
+    for (const auto& arg_type : proto_key.args) {
+      AddType(arg_type);
+    }
+    auto it = protos_.emplace(proto_key, IdxAndDataOffset()).first;
+    const ProtoKey* proto = &it->first;  // Valid as long as the element remains in protos_.
+
+    MethodKey method_key = {
+        class_descriptor, name, proto
+    };
+    methods_.emplace(method_key, 0u);
+  }
+
+  // NOTE: The builder holds the actual data, so it must live as long as the dex file.
+  std::unique_ptr<const DexFile> Build(const std::string& dex_location) {
+    CHECK(dex_file_data_.empty());
+    union {
+      uint8_t data[sizeof(DexFile::Header)];
+      uint64_t force_alignment;
+    } header_data;
+    std::memset(header_data.data, 0, sizeof(header_data.data));
+    DexFile::Header* header = reinterpret_cast<DexFile::Header*>(&header_data.data);
+    std::copy_n(DexFile::kDexMagic, 4u, header->magic_);
+    std::copy_n(DexFile::kDexMagicVersion, 4u, header->magic_ + 4u);
+    header->header_size_ = sizeof(header);
+    header->endian_tag_ = DexFile::kDexEndianConstant;
+    header->link_size_ = 0u;  // Unused.
+    header->link_off_ = 0u;  // Unused.
+    header->map_off_ = 0u;  // Unused.
+
+    uint32_t data_section_size = 0u;
+
+    uint32_t string_ids_offset = sizeof(DexFile::Header);
+    uint32_t string_idx = 0u;
+    for (auto& entry : strings_) {
+      entry.second.idx = string_idx;
+      string_idx += 1u;
+      entry.second.data_offset = data_section_size;
+      data_section_size += entry.first.length() + 1u /* length */ + 1u /* null-terminator */;
+    }
+    header->string_ids_size_ = strings_.size();
+    header->string_ids_off_ = strings_.empty() ? 0u : string_ids_offset;
+
+    uint32_t type_ids_offset = string_ids_offset + strings_.size() * sizeof(DexFile::StringId);
+    uint32_t type_idx = 0u;
+    for (auto& entry : types_) {
+      entry.second = type_idx;
+      type_idx += 1u;
+    }
+    header->type_ids_size_ = types_.size();
+    header->type_ids_off_ = types_.empty() ? 0u : type_ids_offset;
+
+    uint32_t proto_ids_offset = type_ids_offset + types_.size() * sizeof(DexFile::TypeId);
+    uint32_t proto_idx = 0u;
+    for (auto& entry : protos_) {
+      entry.second.idx = proto_idx;
+      proto_idx += 1u;
+      size_t num_args = entry.first.args.size();
+      if (num_args != 0u) {
+        entry.second.data_offset = RoundUp(data_section_size, 4u);
+        data_section_size = entry.second.data_offset + 4u + num_args * sizeof(DexFile::TypeItem);
+      } else {
+        entry.second.data_offset = 0u;
+      }
+    }
+    header->proto_ids_size_ = protos_.size();
+    header->proto_ids_off_ = protos_.empty() ? 0u : proto_ids_offset;
+
+    uint32_t field_ids_offset = proto_ids_offset + protos_.size() * sizeof(DexFile::ProtoId);
+    uint32_t field_idx = 0u;
+    for (auto& entry : fields_) {
+      entry.second = field_idx;
+      field_idx += 1u;
+    }
+    header->field_ids_size_ = fields_.size();
+    header->field_ids_off_ = fields_.empty() ? 0u : field_ids_offset;
+
+    uint32_t method_ids_offset = field_ids_offset + fields_.size() * sizeof(DexFile::FieldId);
+    uint32_t method_idx = 0u;
+    for (auto& entry : methods_) {
+      entry.second = method_idx;
+      method_idx += 1u;
+    }
+    header->method_ids_size_ = methods_.size();
+    header->method_ids_off_ = methods_.empty() ? 0u : method_ids_offset;
+
+    // No class defs.
+    header->class_defs_size_ = 0u;
+    header->class_defs_off_ = 0u;
+
+    uint32_t data_section_offset = method_ids_offset + methods_.size() * sizeof(DexFile::MethodId);
+    header->data_size_ = data_section_size;
+    header->data_off_ = (data_section_size != 0u) ? data_section_offset : 0u;
+
+    uint32_t total_size = data_section_offset + data_section_size;
+
+    dex_file_data_.resize(total_size);
+    std::memcpy(&dex_file_data_[0], header_data.data, sizeof(DexFile::Header));
+
+    for (const auto& entry : strings_) {
+      CHECK_LT(entry.first.size(), 128u);
+      uint32_t raw_offset = data_section_offset + entry.second.data_offset;
+      dex_file_data_[raw_offset] = static_cast<uint8_t>(entry.first.size());
+      std::memcpy(&dex_file_data_[raw_offset + 1], entry.first.c_str(), entry.first.size() + 1);
+      Write32(string_ids_offset + entry.second.idx * sizeof(DexFile::StringId), raw_offset);
+    }
+
+    for (const auto& entry : types_) {
+      Write32(type_ids_offset + entry.second * sizeof(DexFile::TypeId), GetStringIdx(entry.first));
+      ++type_idx;
+    }
+
+    for (const auto& entry : protos_) {
+      size_t num_args = entry.first.args.size();
+      uint32_t type_list_offset =
+          (num_args != 0u) ? data_section_offset + entry.second.data_offset : 0u;
+      uint32_t raw_offset = proto_ids_offset + entry.second.idx * sizeof(DexFile::ProtoId);
+      Write32(raw_offset + 0u, GetStringIdx(entry.first.shorty));
+      Write16(raw_offset + 4u, GetTypeIdx(entry.first.return_type));
+      Write32(raw_offset + 8u, type_list_offset);
+      if (num_args != 0u) {
+        CHECK_NE(entry.second.data_offset, 0u);
+        Write32(type_list_offset, num_args);
+        for (size_t i = 0; i != num_args; ++i) {
+          Write16(type_list_offset + 4u + i * sizeof(DexFile::TypeItem),
+                  GetTypeIdx(entry.first.args[i]));
+        }
+      }
+    }
+
+    for (const auto& entry : fields_) {
+      uint32_t raw_offset = field_ids_offset + entry.second * sizeof(DexFile::FieldId);
+      Write16(raw_offset + 0u, GetTypeIdx(entry.first.class_descriptor));
+      Write16(raw_offset + 2u, GetTypeIdx(entry.first.type));
+      Write32(raw_offset + 4u, GetStringIdx(entry.first.name));
+    }
+
+    for (const auto& entry : methods_) {
+      uint32_t raw_offset = method_ids_offset + entry.second * sizeof(DexFile::MethodId);
+      Write16(raw_offset + 0u, GetTypeIdx(entry.first.class_descriptor));
+      auto it = protos_.find(*entry.first.proto);
+      CHECK(it != protos_.end());
+      Write16(raw_offset + 2u, it->second.idx);
+      Write32(raw_offset + 4u, GetStringIdx(entry.first.name));
+    }
+
+    // Leave checksum and signature as zeros.
+
+    std::string error_msg;
+    std::unique_ptr<const DexFile> dex_file(DexFile::Open(
+        &dex_file_data_[0], dex_file_data_.size(), dex_location, 0u, nullptr, &error_msg));
+    CHECK(dex_file != nullptr) << error_msg;
+    return std::move(dex_file);
+  }
+
+  uint32_t GetStringIdx(const std::string& type) {
+    auto it = strings_.find(type);
+    CHECK(it != strings_.end());
+    return it->second.idx;
+  }
+
+  uint32_t GetTypeIdx(const std::string& type) {
+    auto it = types_.find(type);
+    CHECK(it != types_.end());
+    return it->second;
+  }
+
+  uint32_t GetFieldIdx(const std::string& class_descriptor, const std::string& type,
+                       const std::string& name) {
+    FieldKey key = { class_descriptor, type, name };
+    auto it = fields_.find(key);
+    CHECK(it != fields_.end());
+    return it->second;
+  }
+
+  uint32_t GetMethodIdx(const std::string& class_descriptor, const std::string& signature,
+                        const std::string& name) {
+    ProtoKey proto_key = CreateProtoKey(signature);
+    MethodKey method_key = { class_descriptor, name, &proto_key };
+    auto it = methods_.find(method_key);
+    CHECK(it != methods_.end());
+    return it->second;
+  }
+
+ private:
+  struct IdxAndDataOffset {
+    uint32_t idx;
+    uint32_t data_offset;
+  };
+
+  struct FieldKey {
+    const std::string class_descriptor;
+    const std::string type;
+    const std::string name;
+  };
+  struct FieldKeyComparator {
+    bool operator()(const FieldKey& lhs, const FieldKey& rhs) const {
+      if (lhs.class_descriptor != rhs.class_descriptor) {
+        return lhs.class_descriptor < rhs.class_descriptor;
+      }
+      if (lhs.name != rhs.name) {
+        return lhs.name < rhs.name;
+      }
+      return lhs.type < rhs.type;
+    }
+  };
+
+  struct ProtoKey {
+    std::string shorty;
+    std::string return_type;
+    std::vector<std::string> args;
+  };
+  struct ProtoKeyComparator {
+    bool operator()(const ProtoKey& lhs, const ProtoKey& rhs) const {
+      if (lhs.return_type != rhs.return_type) {
+        return lhs.return_type < rhs.return_type;
+      }
+      size_t min_args = std::min(lhs.args.size(), rhs.args.size());
+      for (size_t i = 0; i != min_args; ++i) {
+        if (lhs.args[i] != rhs.args[i]) {
+          return lhs.args[i] < rhs.args[i];
+        }
+      }
+      return lhs.args.size() < rhs.args.size();
+    }
+  };
+
+  struct MethodKey {
+    std::string class_descriptor;
+    std::string name;
+    const ProtoKey* proto;
+  };
+  struct MethodKeyComparator {
+    bool operator()(const MethodKey& lhs, const MethodKey& rhs) const {
+      if (lhs.class_descriptor != rhs.class_descriptor) {
+        return lhs.class_descriptor < rhs.class_descriptor;
+      }
+      if (lhs.name != rhs.name) {
+        return lhs.name < rhs.name;
+      }
+      return ProtoKeyComparator()(*lhs.proto, *rhs.proto);
+    }
+  };
+
+  ProtoKey CreateProtoKey(const std::string& signature) {
+    CHECK_EQ(signature[0], '(');
+    const char* args = signature.c_str() + 1;
+    const char* args_end = std::strchr(args, ')');
+    CHECK(args_end != nullptr);
+    const char* return_type = args_end + 1;
+
+    ProtoKey key = {
+        std::string() + ((*return_type == '[') ? 'L' : *return_type),
+        return_type,
+        std::vector<std::string>()
+    };
+    while (args != args_end) {
+      key.shorty += (*args == '[') ? 'L' : *args;
+      const char* arg_start = args;
+      while (*args == '[') {
+        ++args;
+      }
+      if (*args == 'L') {
+        do {
+          ++args;
+          CHECK_NE(args, args_end);
+        } while (*args != ';');
+      }
+      ++args;
+      key.args.emplace_back(arg_start, args);
+    }
+    return key;
+  }
+
+  void Write32(size_t offset, uint32_t value) {
+    CHECK_LE(offset + 4u, dex_file_data_.size());
+    CHECK_EQ(dex_file_data_[offset + 0], 0u);
+    CHECK_EQ(dex_file_data_[offset + 1], 0u);
+    CHECK_EQ(dex_file_data_[offset + 2], 0u);
+    CHECK_EQ(dex_file_data_[offset + 3], 0u);
+    dex_file_data_[offset + 0] = static_cast<uint8_t>(value >> 0);
+    dex_file_data_[offset + 1] = static_cast<uint8_t>(value >> 8);
+    dex_file_data_[offset + 2] = static_cast<uint8_t>(value >> 16);
+    dex_file_data_[offset + 3] = static_cast<uint8_t>(value >> 24);
+  }
+
+  void Write16(size_t offset, uint32_t value) {
+    CHECK_LE(value, 0xffffu);
+    CHECK_LE(offset + 2u, dex_file_data_.size());
+    CHECK_EQ(dex_file_data_[offset + 0], 0u);
+    CHECK_EQ(dex_file_data_[offset + 1], 0u);
+    dex_file_data_[offset + 0] = static_cast<uint8_t>(value >> 0);
+    dex_file_data_[offset + 1] = static_cast<uint8_t>(value >> 8);
+  }
+
+  std::map<std::string, IdxAndDataOffset> strings_;
+  std::map<std::string, uint32_t> types_;
+  std::map<FieldKey, uint32_t, FieldKeyComparator> fields_;
+  std::map<ProtoKey, IdxAndDataOffset, ProtoKeyComparator> protos_;
+  std::map<MethodKey, uint32_t, MethodKeyComparator> methods_;
+
+  std::vector<uint8_t> dex_file_data_;
+};
+
+}  // namespace art
+
+#endif  // ART_COMPILER_UTILS_TEST_DEX_FILE_BUILDER_H_
diff --git a/compiler/utils/test_dex_file_builder_test.cc b/compiler/utils/test_dex_file_builder_test.cc
new file mode 100644
index 0000000..ee6e35d
--- /dev/null
+++ b/compiler/utils/test_dex_file_builder_test.cc
@@ -0,0 +1,84 @@
+/*
+ * Copyright (C) 2015 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "test_dex_file_builder.h"
+
+#include "dex_file-inl.h"
+#include "gtest/gtest.h"
+
+namespace art {
+
+TEST(TestDexFileBuilderTest, SimpleTest) {
+  TestDexFileBuilder builder;
+  builder.AddString("Arbitrary string");
+  builder.AddType("Ljava/lang/Class;");
+  builder.AddField("LTestClass;", "[I", "intField");
+  builder.AddMethod("LTestClass;", "()I", "foo");
+  builder.AddMethod("LTestClass;", "(Ljava/lang/Object;[Ljava/lang/Object;)LTestClass;", "bar");
+  const char* dex_location = "TestDexFileBuilder/SimpleTest";
+  std::unique_ptr<const DexFile> dex_file(builder.Build(dex_location));
+  ASSERT_TRUE(dex_file != nullptr);
+  EXPECT_STREQ(dex_location, dex_file->GetLocation().c_str());
+
+  static const char* const expected_strings[] = {
+      "Arbitrary string",
+      "I",
+      "LLL",  // shorty
+      "LTestClass;",
+      "Ljava/lang/Class;",
+      "Ljava/lang/Object;",
+      "[I",
+      "[Ljava/lang/Object;",
+      "bar",
+      "foo",
+      "intField",
+  };
+  ASSERT_EQ(arraysize(expected_strings), dex_file->NumStringIds());
+  for (size_t i = 0; i != arraysize(expected_strings); ++i) {
+    EXPECT_STREQ(expected_strings[i], dex_file->GetStringData(dex_file->GetStringId(i))) << i;
+  }
+
+  static const char* const expected_types[] = {
+      "I",
+      "LTestClass;",
+      "Ljava/lang/Class;",
+      "Ljava/lang/Object;",
+      "[I",
+      "[Ljava/lang/Object;",
+  };
+  ASSERT_EQ(arraysize(expected_types), dex_file->NumTypeIds());
+  for (size_t i = 0; i != arraysize(expected_types); ++i) {
+    EXPECT_STREQ(expected_types[i], dex_file->GetTypeDescriptor(dex_file->GetTypeId(i))) << i;
+  }
+
+  ASSERT_EQ(1u, dex_file->NumFieldIds());
+  EXPECT_STREQ("[I TestClass.intField", PrettyField(0u, *dex_file).c_str());
+
+  ASSERT_EQ(2u, dex_file->NumProtoIds());
+  ASSERT_EQ(2u, dex_file->NumMethodIds());
+  EXPECT_STREQ("TestClass TestClass.bar(java.lang.Object, java.lang.Object[])",
+               PrettyMethod(0u, *dex_file).c_str());
+  EXPECT_STREQ("int TestClass.foo()",
+               PrettyMethod(1u, *dex_file).c_str());
+
+  EXPECT_EQ(0u, builder.GetStringIdx("Arbitrary string"));
+  EXPECT_EQ(2u, builder.GetTypeIdx("Ljava/lang/Class;"));
+  EXPECT_EQ(0u, builder.GetFieldIdx("LTestClass;", "[I", "intField"));
+  EXPECT_EQ(1u, builder.GetMethodIdx("LTestClass;", "()I", "foo"));
+  EXPECT_EQ(0u, builder.GetMethodIdx("LTestClass;", "(Ljava/lang/Object;[Ljava/lang/Object;)LTestClass;", "bar"));
+}
+
+}  // namespace art
diff --git a/compiler/utils/x86/assembler_x86.h b/compiler/utils/x86/assembler_x86.h
index a933474..7fc8ef0 100644
--- a/compiler/utils/x86/assembler_x86.h
+++ b/compiler/utils/x86/assembler_x86.h
@@ -576,17 +576,17 @@
   void GetCurrentThread(ManagedRegister tr) OVERRIDE;
   void GetCurrentThread(FrameOffset dest_offset, ManagedRegister scratch) OVERRIDE;
 
-  // Set up out_reg to hold a Object** into the handle scope, or to be NULL if the
+  // Set up out_reg to hold a Object** into the handle scope, or to be null if the
   // value is null and null_allowed. in_reg holds a possibly stale reference
   // that can be used to avoid loading the handle scope entry to see if the value is
-  // NULL.
-  void CreateHandleScopeEntry(ManagedRegister out_reg, FrameOffset handlescope_offset, ManagedRegister in_reg,
-                       bool null_allowed) OVERRIDE;
+  // null.
+  void CreateHandleScopeEntry(ManagedRegister out_reg, FrameOffset handlescope_offset,
+                              ManagedRegister in_reg, bool null_allowed) OVERRIDE;
 
-  // Set up out_off to hold a Object** into the handle scope, or to be NULL if the
+  // Set up out_off to hold a Object** into the handle scope, or to be null if the
   // value is null and null_allowed.
-  void CreateHandleScopeEntry(FrameOffset out_off, FrameOffset handlescope_offset, ManagedRegister scratch,
-                       bool null_allowed) OVERRIDE;
+  void CreateHandleScopeEntry(FrameOffset out_off, FrameOffset handlescope_offset,
+                              ManagedRegister scratch, bool null_allowed) OVERRIDE;
 
   // src holds a handle scope entry (Object**) load this into dst
   void LoadReferenceFromHandleScope(ManagedRegister dst, ManagedRegister src) OVERRIDE;
diff --git a/compiler/utils/x86_64/assembler_x86_64.cc b/compiler/utils/x86_64/assembler_x86_64.cc
index 0344f52..c0ca7ef 100644
--- a/compiler/utils/x86_64/assembler_x86_64.cc
+++ b/compiler/utils/x86_64/assembler_x86_64.cc
@@ -2751,7 +2751,7 @@
   X86_64ManagedRegister out_reg = mout_reg.AsX86_64();
   X86_64ManagedRegister in_reg = min_reg.AsX86_64();
   if (in_reg.IsNoRegister()) {  // TODO(64): && null_allowed
-    // Use out_reg as indicator of NULL
+    // Use out_reg as indicator of null.
     in_reg = out_reg;
     // TODO: movzwl
     movl(in_reg.AsCpuRegister(), Address(CpuRegister(RSP), handle_scope_offset));
diff --git a/compiler/utils/x86_64/assembler_x86_64.h b/compiler/utils/x86_64/assembler_x86_64.h
index 79ad8f5..f5327a8 100644
--- a/compiler/utils/x86_64/assembler_x86_64.h
+++ b/compiler/utils/x86_64/assembler_x86_64.h
@@ -711,17 +711,17 @@
   void GetCurrentThread(ManagedRegister tr) OVERRIDE;
   void GetCurrentThread(FrameOffset dest_offset, ManagedRegister scratch) OVERRIDE;
 
-  // Set up out_reg to hold a Object** into the handle scope, or to be NULL if the
+  // Set up out_reg to hold a Object** into the handle scope, or to be null if the
   // value is null and null_allowed. in_reg holds a possibly stale reference
   // that can be used to avoid loading the handle scope entry to see if the value is
-  // NULL.
-  void CreateHandleScopeEntry(ManagedRegister out_reg, FrameOffset handlescope_offset, ManagedRegister in_reg,
-                       bool null_allowed) OVERRIDE;
+  // null.
+  void CreateHandleScopeEntry(ManagedRegister out_reg, FrameOffset handlescope_offset,
+                              ManagedRegister in_reg, bool null_allowed) OVERRIDE;
 
-  // Set up out_off to hold a Object** into the handle scope, or to be NULL if the
+  // Set up out_off to hold a Object** into the handle scope, or to be null if the
   // value is null and null_allowed.
-  void CreateHandleScopeEntry(FrameOffset out_off, FrameOffset handlescope_offset, ManagedRegister scratch,
-                       bool null_allowed) OVERRIDE;
+  void CreateHandleScopeEntry(FrameOffset out_off, FrameOffset handlescope_offset,
+                              ManagedRegister scratch, bool null_allowed) OVERRIDE;
 
   // src holds a handle scope entry (Object**) load this into dst
   virtual void LoadReferenceFromHandleScope(ManagedRegister dst,
diff --git a/dalvikvm/dalvikvm.cc b/dalvikvm/dalvikvm.cc
index fd03002..85debe4 100644
--- a/dalvikvm/dalvikvm.cc
+++ b/dalvikvm/dalvikvm.cc
@@ -31,19 +31,19 @@
 // Determine whether or not the specified method is public.
 static bool IsMethodPublic(JNIEnv* env, jclass c, jmethodID method_id) {
   ScopedLocalRef<jobject> reflected(env, env->ToReflectedMethod(c, method_id, JNI_FALSE));
-  if (reflected.get() == NULL) {
+  if (reflected.get() == nullptr) {
     fprintf(stderr, "Failed to get reflected method\n");
     return false;
   }
   // We now have a Method instance.  We need to call its
   // getModifiers() method.
   jclass method_class = env->FindClass("java/lang/reflect/Method");
-  if (method_class == NULL) {
+  if (method_class == nullptr) {
     fprintf(stderr, "Failed to find class java.lang.reflect.Method\n");
     return false;
   }
   jmethodID mid = env->GetMethodID(method_class, "getModifiers", "()I");
-  if (mid == NULL) {
+  if (mid == nullptr) {
     fprintf(stderr, "Failed to find java.lang.reflect.Method.getModifiers\n");
     return false;
   }
@@ -61,7 +61,7 @@
   // it.  Create an array and populate it.  Note argv[0] is not
   // included.
   ScopedLocalRef<jobjectArray> args(env, toStringArray(env, argv + 1));
-  if (args.get() == NULL) {
+  if (args.get() == nullptr) {
     env->ExceptionDescribe();
     return EXIT_FAILURE;
   }
@@ -73,14 +73,14 @@
   std::replace(class_name.begin(), class_name.end(), '.', '/');
 
   ScopedLocalRef<jclass> klass(env, env->FindClass(class_name.c_str()));
-  if (klass.get() == NULL) {
+  if (klass.get() == nullptr) {
     fprintf(stderr, "Unable to locate class '%s'\n", class_name.c_str());
     env->ExceptionDescribe();
     return EXIT_FAILURE;
   }
 
   jmethodID method = env->GetStaticMethodID(klass.get(), "main", "([Ljava/lang/String;)V");
-  if (method == NULL) {
+  if (method == nullptr) {
     fprintf(stderr, "Unable to find static main(String[]) in '%s'\n", class_name.c_str());
     env->ExceptionDescribe();
     return EXIT_FAILURE;
@@ -106,7 +106,7 @@
 // Parse arguments.  Most of it just gets passed through to the runtime.
 // The JNI spec defines a handful of standard arguments.
 static int dalvikvm(int argc, char** argv) {
-  setvbuf(stdout, NULL, _IONBF, 0);
+  setvbuf(stdout, nullptr, _IONBF, 0);
 
   // Skip over argv[0].
   argv++;
@@ -125,8 +125,8 @@
   //
   // [Do we need to catch & handle "-jar" here?]
   bool need_extra = false;
-  const char* lib = NULL;
-  const char* what = NULL;
+  const char* lib = nullptr;
+  const char* what = nullptr;
   int curr_opt, arg_idx;
   for (curr_opt = arg_idx = 0; arg_idx < argc; arg_idx++) {
     if (argv[arg_idx][0] != '-' && !need_extra) {
@@ -172,8 +172,8 @@
   init_args.ignoreUnrecognized = JNI_FALSE;
 
   // Start the runtime. The current thread becomes the main thread.
-  JavaVM* vm = NULL;
-  JNIEnv* env = NULL;
+  JavaVM* vm = nullptr;
+  JNIEnv* env = nullptr;
   if (JNI_CreateJavaVM(&vm, &env, &init_args) != JNI_OK) {
     fprintf(stderr, "Failed to initialize runtime (check log for details)\n");
     return EXIT_FAILURE;
diff --git a/dex2oat/dex2oat.cc b/dex2oat/dex2oat.cc
index 9c01a0f..2a3a346 100644
--- a/dex2oat/dex2oat.cc
+++ b/dex2oat/dex2oat.cc
@@ -1518,7 +1518,7 @@
   static size_t OpenDexFiles(const std::vector<const char*>& dex_filenames,
                              const std::vector<const char*>& dex_locations,
                              std::vector<std::unique_ptr<const DexFile>>* dex_files) {
-    DCHECK(dex_files != nullptr) << "OpenDexFiles out-param is NULL";
+    DCHECK(dex_files != nullptr) << "OpenDexFiles out-param is nullptr";
     size_t failure_count = 0;
     for (size_t i = 0; i < dex_filenames.size(); i++) {
       const char* dex_filename = dex_filenames[i];
@@ -1559,7 +1559,7 @@
   static void OpenClassPathFiles(const std::string& class_path,
                                  std::vector<const DexFile*> dex_files,
                                  std::vector<std::unique_ptr<const DexFile>>* opened_dex_files) {
-    DCHECK(opened_dex_files != nullptr) << "OpenClassPathFiles out-param is NULL";
+    DCHECK(opened_dex_files != nullptr) << "OpenClassPathFiles out-param is nullptr";
     std::vector<std::string> parsed;
     Split(class_path, ':', &parsed);
     // Take Locks::mutator_lock_ so that lock ordering on the ClassLinker::dex_lock_ is maintained.
diff --git a/disassembler/disassembler.cc b/disassembler/disassembler.cc
index c05c3ed..6334717 100644
--- a/disassembler/disassembler.cc
+++ b/disassembler/disassembler.cc
@@ -42,7 +42,7 @@
     return new x86::DisassemblerX86(options, true);
   } else {
     UNIMPLEMENTED(FATAL) << "no disassembler for " << instruction_set;
-    return NULL;
+    return nullptr;
   }
 }
 
diff --git a/imgdiag/imgdiag.cc b/imgdiag/imgdiag.cc
index 34a4c14..1056fe1 100644
--- a/imgdiag/imgdiag.cc
+++ b/imgdiag/imgdiag.cc
@@ -97,7 +97,8 @@
 
     {
       struct stat sts;
-      std::string proc_pid_str = StringPrintf("/proc/%ld", static_cast<long>(image_diff_pid));  // NOLINT [runtime/int]
+      std::string proc_pid_str =
+          StringPrintf("/proc/%ld", static_cast<long>(image_diff_pid));  // NOLINT [runtime/int]
       if (stat(proc_pid_str.c_str(), &sts) == -1) {
         os << "Process does not exist";
         return false;
@@ -144,7 +145,8 @@
     const size_t pointer_size = InstructionSetPointerSize(
         Runtime::Current()->GetInstructionSet());
 
-    std::string file_name = StringPrintf("/proc/%ld/mem", static_cast<long>(image_diff_pid));  // NOLINT [runtime/int]
+    std::string file_name =
+        StringPrintf("/proc/%ld/mem", static_cast<long>(image_diff_pid));  // NOLINT [runtime/int]
 
     size_t boot_map_size = boot_map.end - boot_map.start;
 
@@ -197,8 +199,8 @@
       return false;
     }
 
-    std::string page_map_file_name = StringPrintf("/proc/%ld/pagemap",
-                                                  static_cast<long>(image_diff_pid));  // NOLINT [runtime/int]
+    std::string page_map_file_name = StringPrintf(
+        "/proc/%ld/pagemap", static_cast<long>(image_diff_pid));  // NOLINT [runtime/int]
     auto page_map_file = std::unique_ptr<File>(OS::OpenFileForReading(page_map_file_name.c_str()));
     if (page_map_file == nullptr) {
       os << "Failed to open " << page_map_file_name << " for reading: " << strerror(errno);
@@ -226,8 +228,10 @@
       return false;
     }
 
-    std::set<size_t> dirty_page_set_remote;  // Set of the remote virtual page indices that are dirty
-    std::set<size_t> dirty_page_set_local;   // Set of the local virtual page indices that are dirty
+    // Set of the remote virtual page indices that are dirty
+    std::set<size_t> dirty_page_set_remote;
+    // Set of the local virtual page indices that are dirty
+    std::set<size_t> dirty_page_set_local;
 
     size_t different_int32s = 0;
     size_t different_bytes = 0;
diff --git a/oatdump/oatdump.cc b/oatdump/oatdump.cc
index d6d8808..f2e35af 100644
--- a/oatdump/oatdump.cc
+++ b/oatdump/oatdump.cc
@@ -144,7 +144,7 @@
     std::vector<const OatFile::OatDexFile*> oat_dex_files = oat_file_->GetOatDexFiles();
     for (size_t i = 0; i < oat_dex_files.size(); i++) {
       const OatFile::OatDexFile* oat_dex_file = oat_dex_files[i];
-      CHECK(oat_dex_file != NULL);
+      CHECK(oat_dex_file != nullptr);
       WalkOatDexFile(oat_dex_file, callback);
     }
   }
diff --git a/runtime/arch/arm/context_arm.h b/runtime/arch/arm/context_arm.h
index 5bdeda7..a58aecb 100644
--- a/runtime/arch/arm/context_arm.h
+++ b/runtime/arch/arm/context_arm.h
@@ -80,7 +80,7 @@
   NO_RETURN void DoLongJump() OVERRIDE;
 
  private:
-  // Pointers to register locations, initialized to NULL or the specific registers below.
+  // Pointers to register locations, initialized to null or the specific registers below.
   uintptr_t* gprs_[kNumberOfCoreRegisters];
   uint32_t* fprs_[kNumberOfSRegisters];
   // Hold values for sp and pc if they are not located within a stack frame.
diff --git a/runtime/arch/arm/entrypoints_init_arm.cc b/runtime/arch/arm/entrypoints_init_arm.cc
index 055b5ab..f14dfc2 100644
--- a/runtime/arch/arm/entrypoints_init_arm.cc
+++ b/runtime/arch/arm/entrypoints_init_arm.cc
@@ -143,11 +143,16 @@
   qpoints->pQuickImtConflictTrampoline = art_quick_imt_conflict_trampoline;
   qpoints->pQuickResolutionTrampoline = art_quick_resolution_trampoline;
   qpoints->pQuickToInterpreterBridge = art_quick_to_interpreter_bridge;
-  qpoints->pInvokeDirectTrampolineWithAccessCheck = art_quick_invoke_direct_trampoline_with_access_check;
-  qpoints->pInvokeInterfaceTrampolineWithAccessCheck = art_quick_invoke_interface_trampoline_with_access_check;
-  qpoints->pInvokeStaticTrampolineWithAccessCheck = art_quick_invoke_static_trampoline_with_access_check;
-  qpoints->pInvokeSuperTrampolineWithAccessCheck = art_quick_invoke_super_trampoline_with_access_check;
-  qpoints->pInvokeVirtualTrampolineWithAccessCheck = art_quick_invoke_virtual_trampoline_with_access_check;
+  qpoints->pInvokeDirectTrampolineWithAccessCheck =
+      art_quick_invoke_direct_trampoline_with_access_check;
+  qpoints->pInvokeInterfaceTrampolineWithAccessCheck =
+      art_quick_invoke_interface_trampoline_with_access_check;
+  qpoints->pInvokeStaticTrampolineWithAccessCheck =
+      art_quick_invoke_static_trampoline_with_access_check;
+  qpoints->pInvokeSuperTrampolineWithAccessCheck =
+      art_quick_invoke_super_trampoline_with_access_check;
+  qpoints->pInvokeVirtualTrampolineWithAccessCheck =
+      art_quick_invoke_virtual_trampoline_with_access_check;
 
   // Thread
   qpoints->pTestSuspend = art_quick_test_suspend;
diff --git a/runtime/arch/arm/fault_handler_arm.cc b/runtime/arch/arm/fault_handler_arm.cc
index 3e8b367..d84cb53 100644
--- a/runtime/arch/arm/fault_handler_arm.cc
+++ b/runtime/arch/arm/fault_handler_arm.cc
@@ -56,7 +56,7 @@
   struct ucontext *uc = reinterpret_cast<struct ucontext*>(context);
   struct sigcontext *sc = reinterpret_cast<struct sigcontext*>(&uc->uc_mcontext);
   Thread* self = Thread::Current();
-  CHECK(self != nullptr);       // This will cause a SIGABRT if self is nullptr.
+  CHECK(self != nullptr);  // This will cause a SIGABRT if self is null.
 
   sc->arm_r0 = reinterpret_cast<uintptr_t>(*self->GetNestedSignalState());
   sc->arm_r1 = 1;
diff --git a/runtime/arch/arm/quick_entrypoints_arm.S b/runtime/arch/arm/quick_entrypoints_arm.S
index 9bd8ba7..8f6162f 100644
--- a/runtime/arch/arm/quick_entrypoints_arm.S
+++ b/runtime/arch/arm/quick_entrypoints_arm.S
@@ -320,7 +320,7 @@
      * The helper will attempt to locate the target and return a 64-bit result in r0/r1 consisting
      * of the target Method* in r0 and method->code_ in r1.
      *
-     * If unsuccessful, the helper will return NULL/NULL. There will bea pending exception in the
+     * If unsuccessful, the helper will return null/null. There will bea pending exception in the
      * thread and we branch to another stub to deliver it.
      *
      * On success this wrapper will restore arguments and *jump* to the target, leaving the lr
@@ -359,7 +359,7 @@
      * Quick invocation stub internal.
      * On entry:
      *   r0 = method pointer
-     *   r1 = argument array or NULL for no argument methods
+     *   r1 = argument array or null for no argument methods
      *   r2 = size of argument array in bytes
      *   r3 = (managed) thread pointer
      *   [sp] = JValue* result
@@ -409,7 +409,7 @@
     add    r0, sp, #4                      @ pass stack pointer + method ptr as dest for memcpy
     bl     memcpy                          @ memcpy (dest, src, bytes)
     mov    ip, #0                          @ set ip to 0
-    str    ip, [sp]                        @ store NULL for method* at bottom of frame
+    str    ip, [sp]                        @ store null for method* at bottom of frame
 
     ldr    ip, [r11, #48]                  @ load fp register argument array pointer
     vldm   ip, {s0-s15}                    @ copy s0 - s15
diff --git a/runtime/arch/arm64/context_arm64.h b/runtime/arch/arm64/context_arm64.h
index f486779..0383ad6 100644
--- a/runtime/arch/arm64/context_arm64.h
+++ b/runtime/arch/arm64/context_arm64.h
@@ -80,7 +80,7 @@
   NO_RETURN void DoLongJump() OVERRIDE;
 
  private:
-  // Pointers to register locations, initialized to NULL or the specific registers below.
+  // Pointers to register locations, initialized to null or the specific registers below.
   uintptr_t* gprs_[kNumberOfXRegisters];
   uint64_t * fprs_[kNumberOfDRegisters];
   // Hold values for sp and pc if they are not located within a stack frame.
diff --git a/runtime/arch/arm64/entrypoints_init_arm64.cc b/runtime/arch/arm64/entrypoints_init_arm64.cc
index 6c787e3..4b12f00 100644
--- a/runtime/arch/arm64/entrypoints_init_arm64.cc
+++ b/runtime/arch/arm64/entrypoints_init_arm64.cc
@@ -105,7 +105,7 @@
   qpoints->pUnlockObject = art_quick_unlock_object;
 
   // Math
-  // TODO nullptr entrypoints not needed for ARM64 - generate inline.
+  // TODO null entrypoints not needed for ARM64 - generate inline.
   qpoints->pCmpgDouble = nullptr;
   qpoints->pCmpgFloat = nullptr;
   qpoints->pCmplDouble = nullptr;
@@ -135,11 +135,16 @@
   qpoints->pQuickImtConflictTrampoline = art_quick_imt_conflict_trampoline;
   qpoints->pQuickResolutionTrampoline = art_quick_resolution_trampoline;
   qpoints->pQuickToInterpreterBridge = art_quick_to_interpreter_bridge;
-  qpoints->pInvokeDirectTrampolineWithAccessCheck = art_quick_invoke_direct_trampoline_with_access_check;
-  qpoints->pInvokeInterfaceTrampolineWithAccessCheck = art_quick_invoke_interface_trampoline_with_access_check;
-  qpoints->pInvokeStaticTrampolineWithAccessCheck = art_quick_invoke_static_trampoline_with_access_check;
-  qpoints->pInvokeSuperTrampolineWithAccessCheck = art_quick_invoke_super_trampoline_with_access_check;
-  qpoints->pInvokeVirtualTrampolineWithAccessCheck = art_quick_invoke_virtual_trampoline_with_access_check;
+  qpoints->pInvokeDirectTrampolineWithAccessCheck =
+      art_quick_invoke_direct_trampoline_with_access_check;
+  qpoints->pInvokeInterfaceTrampolineWithAccessCheck =
+      art_quick_invoke_interface_trampoline_with_access_check;
+  qpoints->pInvokeStaticTrampolineWithAccessCheck =
+      art_quick_invoke_static_trampoline_with_access_check;
+  qpoints->pInvokeSuperTrampolineWithAccessCheck =
+      art_quick_invoke_super_trampoline_with_access_check;
+  qpoints->pInvokeVirtualTrampolineWithAccessCheck =
+      art_quick_invoke_virtual_trampoline_with_access_check;
 
   // Thread
   qpoints->pTestSuspend = art_quick_test_suspend;
diff --git a/runtime/arch/arm64/fault_handler_arm64.cc b/runtime/arch/arm64/fault_handler_arm64.cc
index c914d85..0448c76 100644
--- a/runtime/arch/arm64/fault_handler_arm64.cc
+++ b/runtime/arch/arm64/fault_handler_arm64.cc
@@ -45,7 +45,7 @@
   struct ucontext *uc = reinterpret_cast<struct ucontext*>(context);
   struct sigcontext *sc = reinterpret_cast<struct sigcontext*>(&uc->uc_mcontext);
   Thread* self = Thread::Current();
-  CHECK(self != nullptr);       // This will cause a SIGABRT if self is nullptr.
+  CHECK(self != nullptr);       // This will cause a SIGABRT if self is null.
 
   sc->regs[0] = reinterpret_cast<uintptr_t>(*self->GetNestedSignalState());
   sc->regs[1] = 1;
diff --git a/runtime/arch/arm64/quick_entrypoints_arm64.S b/runtime/arch/arm64/quick_entrypoints_arm64.S
index 4079436..cbd4b7c 100644
--- a/runtime/arch/arm64/quick_entrypoints_arm64.S
+++ b/runtime/arch/arm64/quick_entrypoints_arm64.S
@@ -466,7 +466,7 @@
      * The helper will attempt to locate the target and return a 128-bit result in x0/x1 consisting
      * of the target Method* in x0 and method->code_ in x1.
      *
-     * If unsuccessful, the helper will return NULL/????. There will be a pending exception in the
+     * If unsuccessful, the helper will return null/????. There will be a pending exception in the
      * thread and we branch to another stub to deliver it.
      *
      * On success this wrapper will restore arguments and *jump* to the target, leaving the lr
@@ -565,7 +565,7 @@
     // W2 - args length
     // X9 - destination address.
     // W10 - temporary
-    add x9, sp, #4                         // Destination address is bottom of stack + NULL.
+    add x9, sp, #4                         // Destination address is bottom of stack + null.
 
     // Use \@ to differentiate between macro invocations.
 .LcopyParams\@:
@@ -579,7 +579,7 @@
 
 .LendCopyParams\@:
 
-    // Store NULL into StackReference<Method>* at bottom of frame.
+    // Store null into StackReference<Method>* at bottom of frame.
     str wzr, [sp]
 
 #if (STACK_REFERENCE_SIZE != 4)
diff --git a/runtime/arch/mips/context_mips.h b/runtime/arch/mips/context_mips.h
index cbad3f963..d01b95e 100644
--- a/runtime/arch/mips/context_mips.h
+++ b/runtime/arch/mips/context_mips.h
@@ -79,7 +79,7 @@
   NO_RETURN void DoLongJump() OVERRIDE;
 
  private:
-  // Pointers to registers in the stack, initialized to NULL except for the special cases below.
+  // Pointers to registers in the stack, initialized to null except for the special cases below.
   uintptr_t* gprs_[kNumberOfCoreRegisters];
   uint32_t* fprs_[kNumberOfFRegisters];
   // Hold values for sp and ra (return address) if they are not located within a stack frame.
diff --git a/runtime/arch/mips/entrypoints_init_mips.cc b/runtime/arch/mips/entrypoints_init_mips.cc
index e3ec27c..a980a86 100644
--- a/runtime/arch/mips/entrypoints_init_mips.cc
+++ b/runtime/arch/mips/entrypoints_init_mips.cc
@@ -199,7 +199,7 @@
   static_assert(IsDirectEntrypoint(kQuickD2iz), "Direct C stub not marked direct.");
   qpoints->pF2iz = art_f2i;
   static_assert(IsDirectEntrypoint(kQuickF2iz), "Direct C stub not marked direct.");
-  qpoints->pIdivmod = NULL;
+  qpoints->pIdivmod = nullptr;
   qpoints->pD2l = art_d2l;
   static_assert(IsDirectEntrypoint(kQuickD2l), "Direct C stub not marked direct.");
   qpoints->pF2l = art_f2l;
@@ -228,19 +228,24 @@
   qpoints->pQuickImtConflictTrampoline = art_quick_imt_conflict_trampoline;
   qpoints->pQuickResolutionTrampoline = art_quick_resolution_trampoline;
   qpoints->pQuickToInterpreterBridge = art_quick_to_interpreter_bridge;
-  qpoints->pInvokeDirectTrampolineWithAccessCheck = art_quick_invoke_direct_trampoline_with_access_check;
+  qpoints->pInvokeDirectTrampolineWithAccessCheck =
+      art_quick_invoke_direct_trampoline_with_access_check;
   static_assert(!IsDirectEntrypoint(kQuickInvokeDirectTrampolineWithAccessCheck),
                 "Non-direct C stub marked direct.");
-  qpoints->pInvokeInterfaceTrampolineWithAccessCheck = art_quick_invoke_interface_trampoline_with_access_check;
+  qpoints->pInvokeInterfaceTrampolineWithAccessCheck =
+      art_quick_invoke_interface_trampoline_with_access_check;
   static_assert(!IsDirectEntrypoint(kQuickInvokeInterfaceTrampolineWithAccessCheck),
                 "Non-direct C stub marked direct.");
-  qpoints->pInvokeStaticTrampolineWithAccessCheck = art_quick_invoke_static_trampoline_with_access_check;
+  qpoints->pInvokeStaticTrampolineWithAccessCheck =
+      art_quick_invoke_static_trampoline_with_access_check;
   static_assert(!IsDirectEntrypoint(kQuickInvokeStaticTrampolineWithAccessCheck),
                 "Non-direct C stub marked direct.");
-  qpoints->pInvokeSuperTrampolineWithAccessCheck = art_quick_invoke_super_trampoline_with_access_check;
+  qpoints->pInvokeSuperTrampolineWithAccessCheck =
+      art_quick_invoke_super_trampoline_with_access_check;
   static_assert(!IsDirectEntrypoint(kQuickInvokeSuperTrampolineWithAccessCheck),
                 "Non-direct C stub marked direct.");
-  qpoints->pInvokeVirtualTrampolineWithAccessCheck = art_quick_invoke_virtual_trampoline_with_access_check;
+  qpoints->pInvokeVirtualTrampolineWithAccessCheck =
+      art_quick_invoke_virtual_trampoline_with_access_check;
   static_assert(!IsDirectEntrypoint(kQuickInvokeVirtualTrampolineWithAccessCheck),
                 "Non-direct C stub marked direct.");
 
diff --git a/runtime/arch/mips/quick_entrypoints_mips.S b/runtime/arch/mips/quick_entrypoints_mips.S
index 0c2250e..622c48f 100644
--- a/runtime/arch/mips/quick_entrypoints_mips.S
+++ b/runtime/arch/mips/quick_entrypoints_mips.S
@@ -446,7 +446,7 @@
      * The helper will attempt to locate the target and return a 64-bit result in $v0/$v1 consisting
      * of the target Method* in $v0 and method->code_ in $v1.
      *
-     * If unsuccessful, the helper will return NULL/NULL. There will be a pending exception in the
+     * If unsuccessful, the helper will return null/null. There will be a pending exception in the
      * thread and we branch to another stub to deliver it.
      *
      * On success this wrapper will restore arguments and *jump* to the target, leaving the lr
@@ -484,7 +484,7 @@
      * Invocation stub for quick code.
      * On entry:
      *   a0 = method pointer
-     *   a1 = argument array or NULL for no argument methods
+     *   a1 = argument array or null for no argument methods
      *   a2 = size of argument array in bytes
      *   a3 = (managed) thread pointer
      *   [sp + 16] = JValue* result
@@ -520,7 +520,7 @@
     lw    $a3, 12($sp)          # copy arg value for a3
     lw    $t9, MIRROR_ART_METHOD_QUICK_CODE_OFFSET_32($a0)  # get pointer to the code
     jalr  $t9                   # call the method
-    sw    $zero, 0($sp)         # store NULL for method* at bottom of frame
+    sw    $zero, 0($sp)         # store null for method* at bottom of frame
     move  $sp, $fp              # restore the stack
     lw    $s0, 0($sp)
     .cfi_restore 16
diff --git a/runtime/arch/mips64/context_mips64.h b/runtime/arch/mips64/context_mips64.h
index 2cc2b8d..ebc036c 100644
--- a/runtime/arch/mips64/context_mips64.h
+++ b/runtime/arch/mips64/context_mips64.h
@@ -79,7 +79,7 @@
   NO_RETURN void DoLongJump() OVERRIDE;
 
  private:
-  // Pointers to registers in the stack, initialized to NULL except for the special cases below.
+  // Pointers to registers in the stack, initialized to null except for the special cases below.
   uintptr_t* gprs_[kNumberOfGpuRegisters];
   uint64_t* fprs_[kNumberOfFpuRegisters];
   // Hold values for sp and ra (return address) if they are not located within a stack frame.
diff --git a/runtime/arch/mips64/entrypoints_init_mips64.cc b/runtime/arch/mips64/entrypoints_init_mips64.cc
index 4a3bf02..b328708 100644
--- a/runtime/arch/mips64/entrypoints_init_mips64.cc
+++ b/runtime/arch/mips64/entrypoints_init_mips64.cc
@@ -135,15 +135,15 @@
   qpoints->pL2f = art_l2f;
   qpoints->pD2iz = art_d2i;
   qpoints->pF2iz = art_f2i;
-  qpoints->pIdivmod = NULL;
+  qpoints->pIdivmod = nullptr;
   qpoints->pD2l = art_d2l;
   qpoints->pF2l = art_f2l;
   qpoints->pLdiv = artLdiv;
   qpoints->pLmod = artLmod;
   qpoints->pLmul = artLmul;
-  qpoints->pShlLong = NULL;
-  qpoints->pShrLong = NULL;
-  qpoints->pUshrLong = NULL;
+  qpoints->pShlLong = nullptr;
+  qpoints->pShrLong = nullptr;
+  qpoints->pUshrLong = nullptr;
 
   // Intrinsics
   qpoints->pIndexOf = art_quick_indexof;
@@ -154,11 +154,16 @@
   qpoints->pQuickImtConflictTrampoline = art_quick_imt_conflict_trampoline;
   qpoints->pQuickResolutionTrampoline = art_quick_resolution_trampoline;
   qpoints->pQuickToInterpreterBridge = art_quick_to_interpreter_bridge;
-  qpoints->pInvokeDirectTrampolineWithAccessCheck = art_quick_invoke_direct_trampoline_with_access_check;
-  qpoints->pInvokeInterfaceTrampolineWithAccessCheck = art_quick_invoke_interface_trampoline_with_access_check;
-  qpoints->pInvokeStaticTrampolineWithAccessCheck = art_quick_invoke_static_trampoline_with_access_check;
-  qpoints->pInvokeSuperTrampolineWithAccessCheck = art_quick_invoke_super_trampoline_with_access_check;
-  qpoints->pInvokeVirtualTrampolineWithAccessCheck = art_quick_invoke_virtual_trampoline_with_access_check;
+  qpoints->pInvokeDirectTrampolineWithAccessCheck =
+      art_quick_invoke_direct_trampoline_with_access_check;
+  qpoints->pInvokeInterfaceTrampolineWithAccessCheck =
+      art_quick_invoke_interface_trampoline_with_access_check;
+  qpoints->pInvokeStaticTrampolineWithAccessCheck =
+      art_quick_invoke_static_trampoline_with_access_check;
+  qpoints->pInvokeSuperTrampolineWithAccessCheck =
+      art_quick_invoke_super_trampoline_with_access_check;
+  qpoints->pInvokeVirtualTrampolineWithAccessCheck =
+      art_quick_invoke_virtual_trampoline_with_access_check;
 
   // Thread
   qpoints->pTestSuspend = art_quick_test_suspend;
diff --git a/runtime/arch/mips64/quick_entrypoints_mips64.S b/runtime/arch/mips64/quick_entrypoints_mips64.S
index 3d502e6..bf18dd5 100644
--- a/runtime/arch/mips64/quick_entrypoints_mips64.S
+++ b/runtime/arch/mips64/quick_entrypoints_mips64.S
@@ -503,7 +503,7 @@
      * The helper will attempt to locate the target and return a 128-bit result in $v0/$v1 consisting
      * of the target Method* in $v0 and method->code_ in $v1.
      *
-     * If unsuccessful, the helper will return NULL/NULL. There will be a pending exception in the
+     * If unsuccessful, the helper will return null/null. There will be a pending exception in the
      * thread and we branch to another stub to deliver it.
      *
      * On success this wrapper will restore arguments and *jump* to the target, leaving the ra
@@ -656,7 +656,7 @@
     # call method (a0 and a1 have been untouched)
     lwu    $a1, 0($a1)           # make a1 = this ptr
     sw     $a1, 4($sp)           # copy this ptr (skip 4 bytes for method*)
-    sw     $zero, 0($sp)         # store NULL for method* at bottom of frame
+    sw     $zero, 0($sp)         # store null for method* at bottom of frame
     ld     $t9, MIRROR_ART_METHOD_QUICK_CODE_OFFSET_64($a0)  # get pointer to the code
     jalr   $t9                   # call the method
     nop
@@ -758,7 +758,7 @@
 
 call_sfn:
     # call method (a0 has been untouched)
-    sw     $zero, 0($sp)         # store NULL for method* at bottom of frame
+    sw     $zero, 0($sp)         # store null for method* at bottom of frame
     ld     $t9, MIRROR_ART_METHOD_QUICK_CODE_OFFSET_64($a0)  # get pointer to the code
     jalr   $t9                   # call the method
     nop
diff --git a/runtime/arch/stub_test.cc b/runtime/arch/stub_test.cc
index 9cccf7c..0d9a888 100644
--- a/runtime/arch/stub_test.cc
+++ b/runtime/arch/stub_test.cc
@@ -117,7 +117,7 @@
         "add sp, sp, #20\n\t"
 
         "blx r3\n\t"                // Call the stub
-        "add sp, sp, #12\n\t"       // Pop nullptr and padding
+        "add sp, sp, #12\n\t"       // Pop null and padding
         ".cfi_adjust_cfa_offset -12\n\t"
         "pop {r1-r12, lr}\n\t"      // Restore state
         ".cfi_adjust_cfa_offset -52\n\t"
@@ -269,7 +269,7 @@
         "pushq (%%rsp)\n\t"             // & 16B alignment padding
         ".cfi_adjust_cfa_offset 16\n\t"
         "call *%%rax\n\t"              // Call the stub
-        "addq $16, %%rsp\n\t"          // Pop nullptr and padding
+        "addq $16, %%rsp\n\t"          // Pop null and padding
         ".cfi_adjust_cfa_offset -16\n\t"
         : "=a" (result)
           // Use the result from rax
@@ -344,7 +344,7 @@
         "add sp, sp, #24\n\t"
 
         "blx r3\n\t"                // Call the stub
-        "add sp, sp, #12\n\t"       // Pop nullptr and padding
+        "add sp, sp, #12\n\t"       // Pop null and padding
         ".cfi_adjust_cfa_offset -12\n\t"
         "pop {r1-r12, lr}\n\t"      // Restore state
         ".cfi_adjust_cfa_offset -52\n\t"
@@ -495,7 +495,7 @@
         "pushq (%%rsp)\n\t"            // & 16B alignment padding
         ".cfi_adjust_cfa_offset 16\n\t"
         "call *%%rbx\n\t"              // Call the stub
-        "addq $16, %%rsp\n\t"          // Pop nullptr and padding
+        "addq $16, %%rsp\n\t"          // Pop null and padding
         ".cfi_adjust_cfa_offset -16\n\t"
         : "=a" (result)
         // Use the result from rax
@@ -1032,7 +1032,7 @@
   }
 
   {
-    // We can use nullptr in the second argument as we do not need a method here (not used in
+    // We can use null in the second argument as we do not need a method here (not used in
     // resolved/initialized cases)
     size_t result = Invoke3(reinterpret_cast<size_t>(c.Get()), reinterpret_cast<size_t>(nullptr), 0U,
                             StubTest::GetEntrypoint(self, kQuickAllocObjectResolved),
@@ -1046,7 +1046,7 @@
   }
 
   {
-    // We can use nullptr in the second argument as we do not need a method here (not used in
+    // We can use null in the second argument as we do not need a method here (not used in
     // resolved/initialized cases)
     size_t result = Invoke3(reinterpret_cast<size_t>(c.Get()), reinterpret_cast<size_t>(nullptr), 0U,
                             StubTest::GetEntrypoint(self, kQuickAllocObjectInitialized),
@@ -1166,7 +1166,7 @@
   }
 
   {
-    // We can use nullptr in the second argument as we do not need a method here (not used in
+    // We can use null in the second argument as we do not need a method here (not used in
     // resolved/initialized cases)
     size_t result = Invoke3(reinterpret_cast<size_t>(c.Get()), 10U,
                             reinterpret_cast<size_t>(nullptr),
@@ -1788,9 +1788,9 @@
 
   JNIEnv* env = Thread::Current()->GetJniEnv();
   jclass jc = env->FindClass("AllFields");
-  CHECK(jc != NULL);
+  CHECK(jc != nullptr);
   jobject o = env->AllocObject(jc);
-  CHECK(o != NULL);
+  CHECK(o != nullptr);
 
   ScopedObjectAccess soa(self);
   StackHandleScope<4> hs(self);
diff --git a/runtime/arch/x86/context_x86.h b/runtime/arch/x86/context_x86.h
index ace4670..a783d48 100644
--- a/runtime/arch/x86/context_x86.h
+++ b/runtime/arch/x86/context_x86.h
@@ -92,7 +92,7 @@
     XMM7_0, XMM7_1,
     kNumberOfFloatRegisters};
 
-  // Pointers to register locations. Values are initialized to NULL or the special registers below.
+  // Pointers to register locations. Values are initialized to null or the special registers below.
   uintptr_t* gprs_[kNumberOfCpuRegisters];
   uint32_t* fprs_[kNumberOfFloatRegisters];
   // Hold values for esp and eip if they are not located within a stack frame. EIP is somewhat
diff --git a/runtime/arch/x86/entrypoints_init_x86.cc b/runtime/arch/x86/entrypoints_init_x86.cc
index c012173..a371632 100644
--- a/runtime/arch/x86/entrypoints_init_x86.cc
+++ b/runtime/arch/x86/entrypoints_init_x86.cc
@@ -96,17 +96,6 @@
   qpoints->pUnlockObject = art_quick_unlock_object;
 
   // Math
-  // points->pCmpgDouble = NULL;  // Not needed on x86.
-  // points->pCmpgFloat = NULL;  // Not needed on x86.
-  // points->pCmplDouble = NULL;  // Not needed on x86.
-  // points->pCmplFloat = NULL;  // Not needed on x86.
-  // qpoints->pFmod = NULL;  // Not needed on x86.
-  // qpoints->pL2d = NULL;  // Not needed on x86.
-  // qpoints->pFmodf = NULL;  // Not needed on x86.
-  // qpoints->pL2f = NULL;  // Not needed on x86.
-  // points->pD2iz = NULL;  // Not needed on x86.
-  // points->pF2iz = NULL;  // Not needed on x86.
-  // qpoints->pIdivmod = NULL;  // Not needed on x86.
   qpoints->pD2l = art_quick_d2l;
   qpoints->pF2l = art_quick_f2l;
   qpoints->pLdiv = art_quick_ldiv;
@@ -125,11 +114,16 @@
   qpoints->pQuickImtConflictTrampoline = art_quick_imt_conflict_trampoline;
   qpoints->pQuickResolutionTrampoline = art_quick_resolution_trampoline;
   qpoints->pQuickToInterpreterBridge = art_quick_to_interpreter_bridge;
-  qpoints->pInvokeDirectTrampolineWithAccessCheck = art_quick_invoke_direct_trampoline_with_access_check;
-  qpoints->pInvokeInterfaceTrampolineWithAccessCheck = art_quick_invoke_interface_trampoline_with_access_check;
-  qpoints->pInvokeStaticTrampolineWithAccessCheck = art_quick_invoke_static_trampoline_with_access_check;
-  qpoints->pInvokeSuperTrampolineWithAccessCheck = art_quick_invoke_super_trampoline_with_access_check;
-  qpoints->pInvokeVirtualTrampolineWithAccessCheck = art_quick_invoke_virtual_trampoline_with_access_check;
+  qpoints->pInvokeDirectTrampolineWithAccessCheck =
+      art_quick_invoke_direct_trampoline_with_access_check;
+  qpoints->pInvokeInterfaceTrampolineWithAccessCheck =
+      art_quick_invoke_interface_trampoline_with_access_check;
+  qpoints->pInvokeStaticTrampolineWithAccessCheck =
+      art_quick_invoke_static_trampoline_with_access_check;
+  qpoints->pInvokeSuperTrampolineWithAccessCheck =
+      art_quick_invoke_super_trampoline_with_access_check;
+  qpoints->pInvokeVirtualTrampolineWithAccessCheck =
+      art_quick_invoke_virtual_trampoline_with_access_check;
 
   // Thread
   qpoints->pTestSuspend = art_quick_test_suspend;
diff --git a/runtime/arch/x86/fault_handler_x86.cc b/runtime/arch/x86/fault_handler_x86.cc
index 8712506..2de69aa 100644
--- a/runtime/arch/x86/fault_handler_x86.cc
+++ b/runtime/arch/x86/fault_handler_x86.cc
@@ -240,7 +240,7 @@
   // this code the same for both 32 and 64 bit.
 
   Thread* self = Thread::Current();
-  CHECK(self != nullptr);       // This will cause a SIGABRT if self is nullptr.
+  CHECK(self != nullptr);  // This will cause a SIGABRT if self is null.
 
   struct ucontext* uc = reinterpret_cast<struct ucontext*>(context);
   uc->CTX_JMP_BUF = reinterpret_cast<uintptr_t>(*self->GetNestedSignalState());
diff --git a/runtime/arch/x86/quick_entrypoints_x86.S b/runtime/arch/x86/quick_entrypoints_x86.S
index c5a020a..c5d8b8f 100644
--- a/runtime/arch/x86/quick_entrypoints_x86.S
+++ b/runtime/arch/x86/quick_entrypoints_x86.S
@@ -285,7 +285,7 @@
      * The helper will attempt to locate the target and return a 64-bit result in r0/r1 consisting
      * of the target Method* in r0 and method->code_ in r1.
      *
-     * If unsuccessful, the helper will return NULL/NULL. There will bea pending exception in the
+     * If unsuccessful, the helper will return null/null will bea pending exception in the
      * thread and we branch to another stub to deliver it.
      *
      * On success this wrapper will restore arguments and *jump* to the target, leaving the lr
@@ -408,7 +408,7 @@
      * On entry:
      *   [sp] = return address
      *   [sp + 4] = method pointer
-     *   [sp + 8] = argument array or NULL for no argument methods
+     *   [sp + 8] = argument array or null for no argument methods
      *   [sp + 12] = size of argument array in bytes
      *   [sp + 16] = (managed) thread pointer
      *   [sp + 20] = JValue* result
@@ -442,7 +442,7 @@
     subl LITERAL(20), %ebx        // remove space for return address, ebx, ebp, esi and edi
     subl %ebx, %esp               // reserve stack space for argument array
 
-    movl LITERAL(0), (%esp)       // store NULL for method*
+    movl LITERAL(0), (%esp)       // store null for method*
 
     // Copy arg array into stack.
     movl 28(%ebp), %ecx           // ECX = size of args
@@ -506,7 +506,7 @@
      * On entry:
      *   [sp] = return address
      *   [sp + 4] = method pointer
-     *   [sp + 8] = argument array or NULL for no argument methods
+     *   [sp + 8] = argument array or null for no argument methods
      *   [sp + 12] = size of argument array in bytes
      *   [sp + 16] = (managed) thread pointer
      *   [sp + 20] = JValue* result
@@ -539,7 +539,7 @@
     subl LITERAL(20), %ebx        // remove space for return address, ebx, ebp, esi and edi
     subl %ebx, %esp               // reserve stack space for argument array
 
-    movl LITERAL(0), (%esp)       // store NULL for method*
+    movl LITERAL(0), (%esp)       // store null for method*
 
     // Copy arg array into stack.
     movl 28(%ebp), %ecx           // ECX = size of args
@@ -1352,7 +1352,7 @@
     call SYMBOL(artQuickResolutionTrampoline) // (Method* called, receiver, Thread*, SP)
     movl %eax, %edi               // remember code pointer in EDI
     addl LITERAL(16), %esp        // pop arguments
-    test %eax, %eax               // if code pointer is NULL goto deliver pending exception
+    test %eax, %eax               // if code pointer is null goto deliver pending exception
     jz 1f
     RESTORE_REFS_AND_ARGS_CALLEE_SAVE_FRAME_AND_JUMP
 1:
diff --git a/runtime/arch/x86_64/context_x86_64.h b/runtime/arch/x86_64/context_x86_64.h
index d03aa45..c9b0ff6 100644
--- a/runtime/arch/x86_64/context_x86_64.h
+++ b/runtime/arch/x86_64/context_x86_64.h
@@ -79,7 +79,7 @@
   NO_RETURN void DoLongJump() OVERRIDE;
 
  private:
-  // Pointers to register locations. Values are initialized to NULL or the special registers below.
+  // Pointers to register locations. Values are initialized to null or the special registers below.
   uintptr_t* gprs_[kNumberOfCpuRegisters];
   uint64_t* fprs_[kNumberOfFloatRegisters];
   // Hold values for rsp and rip if they are not located within a stack frame. RIP is somewhat
diff --git a/runtime/arch/x86_64/entrypoints_init_x86_64.cc b/runtime/arch/x86_64/entrypoints_init_x86_64.cc
index 3bc0dc4..0cddec4 100644
--- a/runtime/arch/x86_64/entrypoints_init_x86_64.cc
+++ b/runtime/arch/x86_64/entrypoints_init_x86_64.cc
@@ -101,17 +101,6 @@
   qpoints->pUnlockObject = art_quick_unlock_object;
 
   // Math
-  // points->pCmpgDouble = NULL;  // Not needed on x86.
-  // points->pCmpgFloat = NULL;  // Not needed on x86.
-  // points->pCmplDouble = NULL;  // Not needed on x86.
-  // points->pCmplFloat = NULL;  // Not needed on x86.
-  // qpoints->pFmod = NULL;  // Not needed on x86.
-  // qpoints->pL2d = NULL;  // Not needed on x86.
-  // qpoints->pFmodf = NULL;  // Not needed on x86.
-  // qpoints->pL2f = NULL;  // Not needed on x86.
-  // points->pD2iz = NULL;  // Not needed on x86.
-  // points->pF2iz = NULL;  // Not needed on x86.
-  // qpoints->pIdivmod = NULL;  // Not needed on x86.
   qpoints->pD2l = art_d2l;
   qpoints->pF2l = art_f2l;
   qpoints->pLdiv = art_quick_ldiv;
@@ -122,7 +111,6 @@
   qpoints->pUshrLong = art_quick_lushr;
 
   // Intrinsics
-  // qpoints->pIndexOf = NULL;  // Not needed on x86.
   qpoints->pStringCompareTo = art_quick_string_compareto;
   qpoints->pMemcpy = art_quick_memcpy;
 
@@ -130,11 +118,16 @@
   qpoints->pQuickImtConflictTrampoline = art_quick_imt_conflict_trampoline;
   qpoints->pQuickResolutionTrampoline = art_quick_resolution_trampoline;
   qpoints->pQuickToInterpreterBridge = art_quick_to_interpreter_bridge;
-  qpoints->pInvokeDirectTrampolineWithAccessCheck = art_quick_invoke_direct_trampoline_with_access_check;
-  qpoints->pInvokeInterfaceTrampolineWithAccessCheck = art_quick_invoke_interface_trampoline_with_access_check;
-  qpoints->pInvokeStaticTrampolineWithAccessCheck = art_quick_invoke_static_trampoline_with_access_check;
-  qpoints->pInvokeSuperTrampolineWithAccessCheck = art_quick_invoke_super_trampoline_with_access_check;
-  qpoints->pInvokeVirtualTrampolineWithAccessCheck = art_quick_invoke_virtual_trampoline_with_access_check;
+  qpoints->pInvokeDirectTrampolineWithAccessCheck =
+      art_quick_invoke_direct_trampoline_with_access_check;
+  qpoints->pInvokeInterfaceTrampolineWithAccessCheck =
+      art_quick_invoke_interface_trampoline_with_access_check;
+  qpoints->pInvokeStaticTrampolineWithAccessCheck =
+      art_quick_invoke_static_trampoline_with_access_check;
+  qpoints->pInvokeSuperTrampolineWithAccessCheck =
+      art_quick_invoke_super_trampoline_with_access_check;
+  qpoints->pInvokeVirtualTrampolineWithAccessCheck =
+      art_quick_invoke_virtual_trampoline_with_access_check;
 
   // Thread
   qpoints->pTestSuspend = art_quick_test_suspend;
diff --git a/runtime/arch/x86_64/quick_entrypoints_x86_64.S b/runtime/arch/x86_64/quick_entrypoints_x86_64.S
index ce21f01..8185deb 100644
--- a/runtime/arch/x86_64/quick_entrypoints_x86_64.S
+++ b/runtime/arch/x86_64/quick_entrypoints_x86_64.S
@@ -348,7 +348,7 @@
      * The helper will attempt to locate the target and return a 128-bit result in rax/rdx consisting
      * of the target Method* in rax and method->code_ in rdx.
      *
-     * If unsuccessful, the helper will return NULL/????. There will be a pending exception in the
+     * If unsuccessful, the helper will return null/????. There will be a pending exception in the
      * thread and we branch to another stub to deliver it.
      *
      * On success this wrapper will restore arguments and *jump* to the target, leaving the return
@@ -506,7 +506,7 @@
 #if (STACK_REFERENCE_SIZE != 4)
 #error "STACK_REFERENCE_SIZE(X86_64) size not as expected."
 #endif
-    movl LITERAL(0), (%rsp)       // Store NULL for method*
+    movl LITERAL(0), (%rsp)       // Store null for method*
 
     movl %r10d, %ecx              // Place size of args in rcx.
     movq %rdi, %rax               // rax := method to be called
@@ -554,7 +554,7 @@
      * On entry:
      *   [sp] = return address
      *   rdi = method pointer
-     *   rsi = argument array or NULL if no arguments.
+     *   rsi = argument array or null if no arguments.
      *   rdx = size of argument array in bytes
      *   rcx = (managed) thread pointer
      *   r8 = JValue* result
@@ -600,7 +600,7 @@
 #if (STACK_REFERENCE_SIZE != 4)
 #error "STACK_REFERENCE_SIZE(X86_64) size not as expected."
 #endif
-    movl LITERAL(0), (%rsp)        // Store NULL for method*
+    movl LITERAL(0), (%rsp)        // Store null for method*
 
     movl %r10d, %ecx               // Place size of args in rcx.
     movq %rdi, %rax                // rax := method to be called
@@ -1302,7 +1302,7 @@
     movq %rax, %r10               // Remember returned code pointer in R10.
     movq (%rsp), %rdi             // Load called method into RDI.
     RESTORE_REFS_AND_ARGS_CALLEE_SAVE_FRAME
-    testq %r10, %r10              // If code pointer is NULL goto deliver pending exception.
+    testq %r10, %r10              // If code pointer is null goto deliver pending exception.
     jz 1f
     jmp *%r10                     // Tail call into method.
 1:
diff --git a/runtime/art_field-inl.h b/runtime/art_field-inl.h
index a2625e2..4991ad7 100644
--- a/runtime/art_field-inl.h
+++ b/runtime/art_field-inl.h
@@ -80,7 +80,7 @@
 }
 
 inline uint64_t ArtField::Get64(mirror::Object* object) {
-  DCHECK(object != NULL) << PrettyField(this);
+  DCHECK(object != nullptr) << PrettyField(this);
   DCHECK(!IsStatic() || (object == GetDeclaringClass()) || !Runtime::Current()->IsStarted());
   if (UNLIKELY(IsVolatile())) {
     return object->GetField64Volatile(GetOffset());
@@ -90,7 +90,7 @@
 
 template<bool kTransactionActive>
 inline void ArtField::Set64(mirror::Object* object, uint64_t new_value) {
-  DCHECK(object != NULL) << PrettyField(this);
+  DCHECK(object != nullptr) << PrettyField(this);
   DCHECK(!IsStatic() || (object == GetDeclaringClass()) || !Runtime::Current()->IsStarted());
   if (UNLIKELY(IsVolatile())) {
     object->SetField64Volatile<kTransactionActive>(GetOffset(), new_value);
@@ -100,7 +100,7 @@
 }
 
 inline mirror::Object* ArtField::GetObj(mirror::Object* object) {
-  DCHECK(object != NULL) << PrettyField(this);
+  DCHECK(object != nullptr) << PrettyField(this);
   DCHECK(!IsStatic() || (object == GetDeclaringClass()) || !Runtime::Current()->IsStarted());
   if (UNLIKELY(IsVolatile())) {
     return object->GetFieldObjectVolatile<mirror::Object>(GetOffset());
@@ -110,7 +110,7 @@
 
 template<bool kTransactionActive>
 inline void ArtField::SetObj(mirror::Object* object, mirror::Object* new_value) {
-  DCHECK(object != NULL) << PrettyField(this);
+  DCHECK(object != nullptr) << PrettyField(this);
   DCHECK(!IsStatic() || (object == GetDeclaringClass()) || !Runtime::Current()->IsStarted());
   if (UNLIKELY(IsVolatile())) {
     object->SetFieldObjectVolatile<kTransactionActive>(GetOffset(), new_value);
diff --git a/runtime/art_field.h b/runtime/art_field.h
index 16c46f0..c0620bf 100644
--- a/runtime/art_field.h
+++ b/runtime/art_field.h
@@ -158,7 +158,7 @@
     return (GetAccessFlags() & kAccVolatile) != 0;
   }
 
-  // Returns an instance field with this offset in the given class or nullptr if not found.
+  // Returns an instance field with this offset in the given class or null if not found.
   static ArtField* FindInstanceFieldWithOffset(mirror::Class* klass, uint32_t field_offset)
       SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
 
diff --git a/runtime/base/hex_dump.cc b/runtime/base/hex_dump.cc
index 5423ff0..bce6b53 100644
--- a/runtime/base/hex_dump.cc
+++ b/runtime/base/hex_dump.cc
@@ -27,7 +27,7 @@
     return;
   }
 
-  if (address_ == NULL) {
+  if (address_ == nullptr) {
     os << "00000000:";
     return;
   }
diff --git a/runtime/base/logging.cc b/runtime/base/logging.cc
index 0764b87..0ae7863 100644
--- a/runtime/base/logging.cc
+++ b/runtime/base/logging.cc
@@ -91,7 +91,7 @@
     gProgramInvocationShortName.reset(new std::string((last_slash != nullptr) ? last_slash + 1
                                                                            : argv[0]));
   } else {
-    // TODO: fall back to /proc/self/cmdline when argv is NULL on Linux.
+    // TODO: fall back to /proc/self/cmdline when argv is null on Linux.
     gCmdLine.reset(new std::string("<unset>"));
   }
   const char* tags = getenv("ANDROID_LOG_TAGS");
diff --git a/runtime/base/logging.h b/runtime/base/logging.h
index 014f4ab..8b34374 100644
--- a/runtime/base/logging.h
+++ b/runtime/base/logging.h
@@ -72,7 +72,7 @@
 // This can be used to reveal or conceal logs with specific tags.
 extern void InitLogging(char* argv[]);
 
-// Returns the command line used to invoke the current tool or nullptr if InitLogging hasn't been
+// Returns the command line used to invoke the current tool or null if InitLogging hasn't been
 // performed.
 extern const char* GetCmdLine();
 
diff --git a/runtime/base/mutex-inl.h b/runtime/base/mutex-inl.h
index cb69817..a727992 100644
--- a/runtime/base/mutex-inl.h
+++ b/runtime/base/mutex-inl.h
@@ -39,13 +39,14 @@
 namespace art {
 
 #if ART_USE_FUTEXES
-static inline int futex(volatile int *uaddr, int op, int val, const struct timespec *timeout, volatile int *uaddr2, int val3) {
+static inline int futex(volatile int *uaddr, int op, int val, const struct timespec *timeout,
+                        volatile int *uaddr2, int val3) {
   return syscall(SYS_futex, uaddr, op, val, timeout, uaddr2, val3);
 }
 #endif  // ART_USE_FUTEXES
 
 static inline uint64_t SafeGetTid(const Thread* self) {
-  if (self != NULL) {
+  if (self != nullptr) {
     return static_cast<uint64_t>(self->GetTid());
   } else {
     return static_cast<uint64_t>(GetTid());
@@ -77,7 +78,7 @@
 }
 
 inline void BaseMutex::RegisterAsLocked(Thread* self) {
-  if (UNLIKELY(self == NULL)) {
+  if (UNLIKELY(self == nullptr)) {
     CheckUnattachedThread(level_);
     return;
   }
@@ -86,7 +87,7 @@
     bool bad_mutexes_held = false;
     for (int i = level_; i >= 0; --i) {
       BaseMutex* held_mutex = self->GetHeldMutex(static_cast<LockLevel>(i));
-      if (UNLIKELY(held_mutex != NULL)) {
+      if (UNLIKELY(held_mutex != nullptr)) {
         LOG(ERROR) << "Lock level violation: holding \"" << held_mutex->name_ << "\" "
                    << "(level " << LockLevel(i) << " - " << i
                    << ") while locking \"" << name_ << "\" "
@@ -109,7 +110,7 @@
 }
 
 inline void BaseMutex::RegisterAsUnlocked(Thread* self) {
-  if (UNLIKELY(self == NULL)) {
+  if (UNLIKELY(self == nullptr)) {
     CheckUnattachedThread(level_);
     return;
   }
@@ -117,12 +118,12 @@
     if (kDebugLocking && gAborting == 0) {  // Avoid recursive aborts.
       CHECK(self->GetHeldMutex(level_) == this) << "Unlocking on unacquired mutex: " << name_;
     }
-    self->SetHeldMutex(level_, NULL);
+    self->SetHeldMutex(level_, nullptr);
   }
 }
 
 inline void ReaderWriterMutex::SharedLock(Thread* self) {
-  DCHECK(self == NULL || self == Thread::Current());
+  DCHECK(self == nullptr || self == Thread::Current());
 #if ART_USE_FUTEXES
   bool done = false;
   do {
@@ -143,7 +144,7 @@
 }
 
 inline void ReaderWriterMutex::SharedUnlock(Thread* self) {
-  DCHECK(self == NULL || self == Thread::Current());
+  DCHECK(self == nullptr || self == Thread::Current());
   DCHECK(exclusive_owner_ == 0U || exclusive_owner_ == -1U);
   AssertSharedHeld(self);
   RegisterAsUnlocked(self);
@@ -161,7 +162,7 @@
         if (num_pending_writers_.LoadRelaxed() > 0 ||
             num_pending_readers_.LoadRelaxed() > 0) {
           // Wake any exclusive waiters as there are now no readers.
-          futex(state_.Address(), FUTEX_WAKE, -1, NULL, NULL, 0);
+          futex(state_.Address(), FUTEX_WAKE, -1, nullptr, nullptr, 0);
         }
       }
     } else {
@@ -174,11 +175,11 @@
 }
 
 inline bool Mutex::IsExclusiveHeld(const Thread* self) const {
-  DCHECK(self == NULL || self == Thread::Current());
+  DCHECK(self == nullptr || self == Thread::Current());
   bool result = (GetExclusiveOwnerTid() == SafeGetTid(self));
   if (kDebugLocking) {
     // Sanity debug check that if we think it is locked we have it in our held mutexes.
-    if (result && self != NULL && level_ != kMonitorLock && !gAborting) {
+    if (result && self != nullptr && level_ != kMonitorLock && !gAborting) {
       CHECK_EQ(self->GetHeldMutex(level_), this);
     }
   }
@@ -190,11 +191,11 @@
 }
 
 inline bool ReaderWriterMutex::IsExclusiveHeld(const Thread* self) const {
-  DCHECK(self == NULL || self == Thread::Current());
+  DCHECK(self == nullptr || self == Thread::Current());
   bool result = (GetExclusiveOwnerTid() == SafeGetTid(self));
   if (kDebugLocking) {
     // Sanity that if the pthread thinks we own the lock the Thread agrees.
-    if (self != NULL && result)  {
+    if (self != nullptr && result)  {
       CHECK_EQ(self->GetHeldMutex(level_), this);
     }
   }
diff --git a/runtime/base/mutex.cc b/runtime/base/mutex.cc
index 13dcb8c..99c7246 100644
--- a/runtime/base/mutex.cc
+++ b/runtime/base/mutex.cc
@@ -67,7 +67,7 @@
   Atomic<const BaseMutex*> all_mutexes_guard;
   // All created mutexes guarded by all_mutexes_guard_.
   std::set<BaseMutex*>* all_mutexes;
-  AllMutexData() : all_mutexes(NULL) {}
+  AllMutexData() : all_mutexes(nullptr) {}
 };
 static struct AllMutexData gAllMutexData[kAllMutexDataSize];
 
@@ -114,7 +114,7 @@
 class ScopedContentionRecorder FINAL : public ValueObject {
  public:
   ScopedContentionRecorder(BaseMutex* mutex, uint64_t blocked_tid, uint64_t owner_tid)
-      : mutex_(kLogLockContentions ? mutex : NULL),
+      : mutex_(kLogLockContentions ? mutex : nullptr),
         blocked_tid_(kLogLockContentions ? blocked_tid : 0),
         owner_tid_(kLogLockContentions ? owner_tid : 0),
         start_nano_time_(kLogLockContentions ? NanoTime() : 0) {
@@ -144,7 +144,7 @@
   if (kLogLockContentions) {
     ScopedAllMutexesLock mu(this);
     std::set<BaseMutex*>** all_mutexes_ptr = &gAllMutexData->all_mutexes;
-    if (*all_mutexes_ptr == NULL) {
+    if (*all_mutexes_ptr == nullptr) {
       // We leak the global set of all mutexes to avoid ordering issues in global variable
       // construction/destruction.
       *all_mutexes_ptr = new std::set<BaseMutex*>();
@@ -165,7 +165,7 @@
     os << "Mutex logging:\n";
     ScopedAllMutexesLock mu(reinterpret_cast<const BaseMutex*>(-1));
     std::set<BaseMutex*>* all_mutexes = gAllMutexData->all_mutexes;
-    if (all_mutexes == NULL) {
+    if (all_mutexes == nullptr) {
       // No mutexes have been created yet during at startup.
       return;
     }
@@ -190,7 +190,7 @@
 }
 
 void BaseMutex::CheckSafeToWait(Thread* self) {
-  if (self == NULL) {
+  if (self == nullptr) {
     CheckUnattachedThread(level_);
     return;
   }
@@ -202,7 +202,7 @@
       if (i != level_) {
         BaseMutex* held_mutex = self->GetHeldMutex(static_cast<LockLevel>(i));
         // We expect waits to happen while holding the thread list suspend thread lock.
-        if (held_mutex != NULL) {
+        if (held_mutex != nullptr) {
           LOG(ERROR) << "Holding \"" << held_mutex->name_ << "\" "
                      << "(level " << LockLevel(i) << ") while performing wait on "
                      << "\"" << name_ << "\" (level " << level_ << ")";
@@ -354,7 +354,7 @@
 }
 
 void Mutex::ExclusiveLock(Thread* self) {
-  DCHECK(self == NULL || self == Thread::Current());
+  DCHECK(self == nullptr || self == Thread::Current());
   if (kDebugLocking && !recursive_) {
     AssertNotHeld(self);
   }
@@ -370,7 +370,7 @@
         // Failed to acquire, hang up.
         ScopedContentionRecorder scr(this, SafeGetTid(self), GetExclusiveOwnerTid());
         num_contenders_++;
-        if (futex(state_.Address(), FUTEX_WAIT, 1, NULL, NULL, 0) != 0) {
+        if (futex(state_.Address(), FUTEX_WAIT, 1, nullptr, nullptr, 0) != 0) {
           // EAGAIN and EINTR both indicate a spurious failure, try again from the beginning.
           // We don't use TEMP_FAILURE_RETRY so we can intentionally retry to acquire the lock.
           if ((errno != EAGAIN) && (errno != EINTR)) {
@@ -397,7 +397,7 @@
 }
 
 bool Mutex::ExclusiveTryLock(Thread* self) {
-  DCHECK(self == NULL || self == Thread::Current());
+  DCHECK(self == nullptr || self == Thread::Current());
   if (kDebugLocking && !recursive_) {
     AssertNotHeld(self);
   }
@@ -474,7 +474,7 @@
         if (LIKELY(done)) {  // Spurious fail?
           // Wake a contender.
           if (UNLIKELY(num_contenders_.LoadRelaxed() > 0)) {
-            futex(state_.Address(), FUTEX_WAKE, 1, NULL, NULL, 0);
+            futex(state_.Address(), FUTEX_WAKE, 1, nullptr, nullptr, 0);
           }
         }
       } else {
@@ -537,14 +537,14 @@
     // TODO: should we just not log at all if shutting down? this could be the logging mutex!
     MutexLock mu(Thread::Current(), *Locks::runtime_shutdown_lock_);
     Runtime* runtime = Runtime::Current();
-    bool shutting_down = runtime == NULL || runtime->IsShuttingDownLocked();
+    bool shutting_down = runtime == nullptr || runtime->IsShuttingDownLocked();
     PLOG(shutting_down ? WARNING : FATAL) << "pthread_rwlock_destroy failed for " << name_;
   }
 #endif
 }
 
 void ReaderWriterMutex::ExclusiveLock(Thread* self) {
-  DCHECK(self == NULL || self == Thread::Current());
+  DCHECK(self == nullptr || self == Thread::Current());
   AssertNotExclusiveHeld(self);
 #if ART_USE_FUTEXES
   bool done = false;
@@ -557,7 +557,7 @@
       // Failed to acquire, hang up.
       ScopedContentionRecorder scr(this, SafeGetTid(self), GetExclusiveOwnerTid());
       ++num_pending_writers_;
-      if (futex(state_.Address(), FUTEX_WAIT, cur_state, NULL, NULL, 0) != 0) {
+      if (futex(state_.Address(), FUTEX_WAIT, cur_state, nullptr, nullptr, 0) != 0) {
         // EAGAIN and EINTR both indicate a spurious failure, try again from the beginning.
         // We don't use TEMP_FAILURE_RETRY so we can intentionally retry to acquire the lock.
         if ((errno != EAGAIN) && (errno != EINTR)) {
@@ -578,7 +578,7 @@
 }
 
 void ReaderWriterMutex::ExclusiveUnlock(Thread* self) {
-  DCHECK(self == NULL || self == Thread::Current());
+  DCHECK(self == nullptr || self == Thread::Current());
   AssertExclusiveHeld(self);
   RegisterAsUnlocked(self);
   DCHECK_NE(exclusive_owner_, 0U);
@@ -598,7 +598,7 @@
         // Wake any waiters.
         if (UNLIKELY(num_pending_readers_.LoadRelaxed() > 0 ||
                      num_pending_writers_.LoadRelaxed() > 0)) {
-          futex(state_.Address(), FUTEX_WAKE, -1, NULL, NULL, 0);
+          futex(state_.Address(), FUTEX_WAKE, -1, nullptr, nullptr, 0);
         }
       }
     } else {
@@ -613,7 +613,7 @@
 
 #if HAVE_TIMED_RWLOCK
 bool ReaderWriterMutex::ExclusiveLockWithTimeout(Thread* self, int64_t ms, int32_t ns) {
-  DCHECK(self == NULL || self == Thread::Current());
+  DCHECK(self == nullptr || self == Thread::Current());
 #if ART_USE_FUTEXES
   bool done = false;
   timespec end_abs_ts;
@@ -633,7 +633,7 @@
       }
       ScopedContentionRecorder scr(this, SafeGetTid(self), GetExclusiveOwnerTid());
       ++num_pending_writers_;
-      if (futex(state_.Address(), FUTEX_WAIT, cur_state, &rel_ts, NULL, 0) != 0) {
+      if (futex(state_.Address(), FUTEX_WAIT, cur_state, &rel_ts, nullptr, 0) != 0) {
         if (errno == ETIMEDOUT) {
           --num_pending_writers_;
           return false;  // Timed out.
@@ -671,7 +671,7 @@
   // Owner holds it exclusively, hang up.
   ScopedContentionRecorder scr(this, GetExclusiveOwnerTid(), SafeGetTid(self));
   ++num_pending_readers_;
-  if (futex(state_.Address(), FUTEX_WAIT, cur_state, NULL, NULL, 0) != 0) {
+  if (futex(state_.Address(), FUTEX_WAIT, cur_state, nullptr, nullptr, 0) != 0) {
     if (errno != EAGAIN) {
       PLOG(FATAL) << "futex wait failed for " << name_;
     }
@@ -681,7 +681,7 @@
 #endif
 
 bool ReaderWriterMutex::SharedTryLock(Thread* self) {
-  DCHECK(self == NULL || self == Thread::Current());
+  DCHECK(self == nullptr || self == Thread::Current());
 #if ART_USE_FUTEXES
   bool done = false;
   do {
@@ -710,9 +710,9 @@
 }
 
 bool ReaderWriterMutex::IsSharedHeld(const Thread* self) const {
-  DCHECK(self == NULL || self == Thread::Current());
+  DCHECK(self == nullptr || self == Thread::Current());
   bool result;
-  if (UNLIKELY(self == NULL)) {  // Handle unattached threads.
+  if (UNLIKELY(self == nullptr)) {  // Handle unattached threads.
     result = IsExclusiveHeld(self);  // TODO: a better best effort here.
   } else {
     result = (self->GetHeldMutex(level_) == this);
@@ -770,14 +770,14 @@
     errno = rc;
     MutexLock mu(Thread::Current(), *Locks::runtime_shutdown_lock_);
     Runtime* runtime = Runtime::Current();
-    bool shutting_down = (runtime == NULL) || runtime->IsShuttingDownLocked();
+    bool shutting_down = (runtime == nullptr) || runtime->IsShuttingDownLocked();
     PLOG(shutting_down ? WARNING : FATAL) << "pthread_cond_destroy failed for " << name_;
   }
 #endif
 }
 
 void ConditionVariable::Broadcast(Thread* self) {
-  DCHECK(self == NULL || self == Thread::Current());
+  DCHECK(self == nullptr || self == Thread::Current());
   // TODO: enable below, there's a race in thread creation that causes false failures currently.
   // guard_.AssertExclusiveHeld(self);
   DCHECK_EQ(guard_.GetExclusiveOwnerTid(), SafeGetTid(self));
@@ -805,14 +805,14 @@
 }
 
 void ConditionVariable::Signal(Thread* self) {
-  DCHECK(self == NULL || self == Thread::Current());
+  DCHECK(self == nullptr || self == Thread::Current());
   guard_.AssertExclusiveHeld(self);
 #if ART_USE_FUTEXES
   if (num_waiters_ > 0) {
     sequence_++;  // Indicate a signal occurred.
     // Futex wake 1 waiter who will then come and in contend on mutex. It'd be nice to requeue them
     // to avoid this, however, requeueing can only move all waiters.
-    int num_woken = futex(sequence_.Address(), FUTEX_WAKE, 1, NULL, NULL, 0);
+    int num_woken = futex(sequence_.Address(), FUTEX_WAKE, 1, nullptr, nullptr, 0);
     // Check something was woken or else we changed sequence_ before they had chance to wait.
     CHECK((num_woken == 0) || (num_woken == 1));
   }
@@ -827,7 +827,7 @@
 }
 
 void ConditionVariable::WaitHoldingLocks(Thread* self) {
-  DCHECK(self == NULL || self == Thread::Current());
+  DCHECK(self == nullptr || self == Thread::Current());
   guard_.AssertExclusiveHeld(self);
   unsigned int old_recursion_count = guard_.recursion_count_;
 #if ART_USE_FUTEXES
@@ -837,7 +837,7 @@
   guard_.recursion_count_ = 1;
   int32_t cur_sequence = sequence_.LoadRelaxed();
   guard_.ExclusiveUnlock(self);
-  if (futex(sequence_.Address(), FUTEX_WAIT, cur_sequence, NULL, NULL, 0) != 0) {
+  if (futex(sequence_.Address(), FUTEX_WAIT, cur_sequence, nullptr, nullptr, 0) != 0) {
     // Futex failed, check it is an expected error.
     // EAGAIN == EWOULDBLK, so we let the caller try again.
     // EINTR implies a signal was sent to this thread.
@@ -862,7 +862,7 @@
 }
 
 bool ConditionVariable::TimedWait(Thread* self, int64_t ms, int32_t ns) {
-  DCHECK(self == NULL || self == Thread::Current());
+  DCHECK(self == nullptr || self == Thread::Current());
   bool timed_out = false;
   guard_.AssertExclusiveHeld(self);
   guard_.CheckSafeToWait(self);
@@ -876,7 +876,7 @@
   guard_.recursion_count_ = 1;
   int32_t cur_sequence = sequence_.LoadRelaxed();
   guard_.ExclusiveUnlock(self);
-  if (futex(sequence_.Address(), FUTEX_WAIT, cur_sequence, &rel_ts, NULL, 0) != 0) {
+  if (futex(sequence_.Address(), FUTEX_WAIT, cur_sequence, &rel_ts, nullptr, 0) != 0) {
     if (errno == ETIMEDOUT) {
       // Timed out we're done.
       timed_out = true;
diff --git a/runtime/base/mutex.h b/runtime/base/mutex.h
index 6e4b96c..f2be85e 100644
--- a/runtime/base/mutex.h
+++ b/runtime/base/mutex.h
@@ -344,8 +344,8 @@
   // Assert the current thread has shared access to the ReaderWriterMutex.
   void AssertSharedHeld(const Thread* self) {
     if (kDebugLocking && (gAborting == 0)) {
-      // TODO: we can only assert this well when self != NULL.
-      CHECK(IsSharedHeld(self) || self == NULL) << *this;
+      // TODO: we can only assert this well when self != null.
+      CHECK(IsSharedHeld(self) || self == nullptr) << *this;
     }
   }
   void AssertReaderHeld(const Thread* self) { AssertSharedHeld(self); }
diff --git a/runtime/base/mutex_test.cc b/runtime/base/mutex_test.cc
index 289d3ef..3750c81 100644
--- a/runtime/base/mutex_test.cc
+++ b/runtime/base/mutex_test.cc
@@ -106,7 +106,7 @@
     state->mu.Lock(Thread::Current());
     state->cv.Signal(Thread::Current());
     state->mu.Unlock(Thread::Current());
-    return NULL;
+    return nullptr;
   }
 
   Mutex mu;
@@ -120,14 +120,15 @@
   state.mu.Lock(Thread::Current());
 
   pthread_t pthread;
-  int pthread_create_result = pthread_create(&pthread, NULL, RecursiveLockWait::Callback, &state);
+  int pthread_create_result = pthread_create(&pthread, nullptr, RecursiveLockWait::Callback,
+                                             &state);
   ASSERT_EQ(0, pthread_create_result);
 
   state.cv.Wait(Thread::Current());
 
   state.mu.Unlock(Thread::Current());
   state.mu.Unlock(Thread::Current());
-  EXPECT_EQ(pthread_join(pthread, NULL), 0);
+  EXPECT_EQ(pthread_join(pthread, nullptr), 0);
 }
 
 // This ensures we don't hang when waiting on a recursively locked mutex,
diff --git a/runtime/base/scoped_flock.cc b/runtime/base/scoped_flock.cc
index 0e93eee..71e0590 100644
--- a/runtime/base/scoped_flock.cc
+++ b/runtime/base/scoped_flock.cc
@@ -31,7 +31,7 @@
       UNUSED(file_->FlushCloseOrErase());  // Ignore result.
     }
     file_.reset(OS::OpenFileWithFlags(filename, O_CREAT | O_RDWR));
-    if (file_.get() == NULL) {
+    if (file_.get() == nullptr) {
       *error_msg = StringPrintf("Failed to open file '%s': %s", filename, strerror(errno));
       return false;
     }
@@ -71,14 +71,15 @@
   }
   if (0 != TEMP_FAILURE_RETRY(flock(file_->Fd(), LOCK_EX))) {
     file_.reset();
-    *error_msg = StringPrintf("Failed to lock file '%s': %s", file->GetPath().c_str(), strerror(errno));
+    *error_msg = StringPrintf(
+        "Failed to lock file '%s': %s", file->GetPath().c_str(), strerror(errno));
     return false;
   }
   return true;
 }
 
 File* ScopedFlock::GetFile() {
-  CHECK(file_.get() != NULL);
+  CHECK(file_.get() != nullptr);
   return file_.get();
 }
 
@@ -89,7 +90,7 @@
 ScopedFlock::ScopedFlock() { }
 
 ScopedFlock::~ScopedFlock() {
-  if (file_.get() != NULL) {
+  if (file_.get() != nullptr) {
     int flock_result = TEMP_FAILURE_RETRY(flock(file_->Fd(), LOCK_UN));
     CHECK_EQ(0, flock_result);
     if (file_->FlushCloseOrErase() != 0) {
diff --git a/runtime/base/stl_util.h b/runtime/base/stl_util.h
index 3c5565c..901f25f 100644
--- a/runtime/base/stl_util.h
+++ b/runtime/base/stl_util.h
@@ -54,28 +54,30 @@
 // hash_set, or any other STL container which defines sensible begin(), end(),
 // and clear() methods.
 //
-// If container is NULL, this function is a no-op.
+// If container is null, this function is a no-op.
 //
 // As an alternative to calling STLDeleteElements() directly, consider
 // using a container of std::unique_ptr, which ensures that your container's
 // elements are deleted when the container goes out of scope.
 template <class T>
 void STLDeleteElements(T *container) {
-  if (!container) return;
-  STLDeleteContainerPointers(container->begin(), container->end());
-  container->clear();
+  if (container != nullptr) {
+    STLDeleteContainerPointers(container->begin(), container->end());
+    container->clear();
+  }
 }
 
 // Given an STL container consisting of (key, value) pairs, STLDeleteValues
 // deletes all the "value" components and clears the container.  Does nothing
-// in the case it's given a NULL pointer.
+// in the case it's given a null pointer.
 template <class T>
 void STLDeleteValues(T *v) {
-  if (!v) return;
-  for (typename T::iterator i = v->begin(); i != v->end(); ++i) {
-    delete i->second;
+  if (v != nullptr) {
+    for (typename T::iterator i = v->begin(); i != v->end(); ++i) {
+      delete i->second;
+    }
+    v->clear();
   }
-  v->clear();
 }
 
 template <class T>
diff --git a/runtime/base/variant_map.h b/runtime/base/variant_map.h
index 8655a9e..1d7596a 100644
--- a/runtime/base/variant_map.h
+++ b/runtime/base/variant_map.h
@@ -31,7 +31,7 @@
 //
 // struct VariantMap {
 //   template <typename TValue>
-//   TValue* Get(Key<T> key);  // nullptr if the value was never set, otherwise the value.
+//   TValue* Get(Key<T> key);  // null if the value was never set, otherwise the value.
 //
 //   template <typename TValue>
 //   void Set(Key<T> key, TValue value);
diff --git a/runtime/base/variant_map_test.cc b/runtime/base/variant_map_test.cc
index f306a48..ccb22eb 100644
--- a/runtime/base/variant_map_test.cc
+++ b/runtime/base/variant_map_test.cc
@@ -18,7 +18,7 @@
 #include "gtest/gtest.h"
 
 #define EXPECT_NULL(expected) EXPECT_EQ(reinterpret_cast<const void*>(expected), \
-                                        reinterpret_cast<void*>(NULL));
+                                        static_cast<void*>(nullptr));
 
 namespace art {
 
diff --git a/runtime/check_jni.cc b/runtime/check_jni.cc
index c6940d3..30084d2 100644
--- a/runtime/check_jni.cc
+++ b/runtime/check_jni.cc
@@ -183,7 +183,7 @@
   }
 
   /*
-   * Verify that the pointer value is non-NULL.
+   * Verify that the pointer value is non-null.
    */
   bool CheckNonNull(const void* ptr) {
     if (UNLIKELY(ptr == nullptr)) {
@@ -612,7 +612,7 @@
   };
 
   /*
-   * Verify that "jobj" is a valid non-NULL object reference, and points to
+   * Verify that "jobj" is a valid non-null object reference, and points to
    * an instance of expectedClass.
    *
    * Because we're looking at an object on the GC heap, we have to switch
@@ -941,7 +941,7 @@
     }
   }
   /*
-   * Verify that "array" is non-NULL and points to an Array object.
+   * Verify that "array" is non-null and points to an Array object.
    *
    * Since we're dealing with objects, switch to "running" mode.
    */
@@ -1277,7 +1277,7 @@
    * Verify the guard area and, if "modOkay" is false, that the data itself
    * has not been altered.
    *
-   * The caller has already checked that "dataBuf" is non-NULL.
+   * The caller has already checked that "dataBuf" is non-null.
    */
   static bool Check(const char* function_name, const void* embedded_buf, bool mod_okay) {
     const GuardedCopy* copy = FromEmbedded(embedded_buf);
diff --git a/runtime/class_linker-inl.h b/runtime/class_linker-inl.h
index 87d1c4c..1428749 100644
--- a/runtime/class_linker-inl.h
+++ b/runtime/class_linker-inl.h
@@ -60,7 +60,7 @@
                                                   mirror::ArtMethod* referrer) {
   mirror::Class* declaring_class = referrer->GetDeclaringClass();
   mirror::String* resolved_string = declaring_class->GetDexCacheStrings()->Get(string_idx);
-  if (UNLIKELY(resolved_string == NULL)) {
+  if (UNLIKELY(resolved_string == nullptr)) {
     StackHandleScope<1> hs(Thread::Current());
     Handle<mirror::DexCache> dex_cache(hs.NewHandle(declaring_class->GetDexCache()));
     const DexFile& dex_file = *dex_cache->GetDexFile();
@@ -92,7 +92,7 @@
   mirror::Class* declaring_class = referrer->GetDeclaringClass();
   mirror::DexCache* dex_cache_ptr = declaring_class->GetDexCache();
   mirror::Class* resolved_type = dex_cache_ptr->GetResolvedType(type_idx);
-  if (UNLIKELY(resolved_type == NULL)) {
+  if (UNLIKELY(resolved_type == nullptr)) {
     StackHandleScope<2> hs(Thread::Current());
     Handle<mirror::DexCache> dex_cache(hs.NewHandle(dex_cache_ptr));
     Handle<mirror::ClassLoader> class_loader(hs.NewHandle(declaring_class->GetClassLoader()));
@@ -146,7 +146,7 @@
                                                    bool is_static) {
   mirror::Class* declaring_class = referrer->GetDeclaringClass();
   ArtField* resolved_field = GetResolvedField(field_idx, declaring_class);
-  if (UNLIKELY(resolved_field == NULL)) {
+  if (UNLIKELY(resolved_field == nullptr)) {
     StackHandleScope<2> hs(Thread::Current());
     Handle<mirror::DexCache> dex_cache(hs.NewHandle(declaring_class->GetDexCache()));
     Handle<mirror::ClassLoader> class_loader(hs.NewHandle(declaring_class->GetClassLoader()));
@@ -196,7 +196,7 @@
   DCHECK(!class_roots_.IsNull());
   mirror::ObjectArray<mirror::Class>* class_roots = class_roots_.Read();
   mirror::Class* klass = class_roots->Get(class_root);
-  DCHECK(klass != NULL);
+  DCHECK(klass != nullptr);
   return klass;
 }
 
diff --git a/runtime/class_linker.cc b/runtime/class_linker.cc
index 85b245f..c344eb4 100644
--- a/runtime/class_linker.cc
+++ b/runtime/class_linker.cc
@@ -110,7 +110,7 @@
     mirror::Throwable* pre_allocated = runtime->GetPreAllocatedNoClassDefFoundError();
     self->SetException(pre_allocated);
   } else {
-    if (c->GetVerifyErrorClass() != NULL) {
+    if (c->GetVerifyErrorClass() != nullptr) {
       // TODO: change the verifier to store an _instance_, with a useful detail message?
       std::string temp;
       self->ThrowNewException(c->GetVerifyErrorClass()->GetDescriptor(&temp),
@@ -2271,7 +2271,7 @@
 // the right context.  It does NOT become the class loader for the
 // array class; that always comes from the base element class.
 //
-// Returns nullptr with an exception raised on failure.
+// Returns null with an exception raised on failure.
 mirror::Class* ClassLinker::CreateArrayClass(Thread* self, const char* descriptor, size_t hash,
                                              Handle<mirror::ClassLoader> class_loader) {
   // Identify the underlying component type
diff --git a/runtime/class_linker.h b/runtime/class_linker.h
index d7c625d..8e27413 100644
--- a/runtime/class_linker.h
+++ b/runtime/class_linker.h
@@ -152,7 +152,7 @@
                              const DexFile& dex_file, const DexFile::ClassDef& dex_class_def)
       SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
 
-  // Finds a class by its descriptor, returning NULL if it isn't wasn't loaded
+  // Finds a class by its descriptor, returning null if it isn't wasn't loaded
   // by the given 'class_loader'.
   mirror::Class* LookupClass(Thread* self, const char* descriptor, size_t hash,
                              mirror::ClassLoader* class_loader)
@@ -432,7 +432,7 @@
   void SetEntryPointsToInterpreter(mirror::ArtMethod* method) const
       SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
 
-  // Attempts to insert a class into a class table.  Returns NULL if
+  // Attempts to insert a class into a class table.  Returns null if
   // the class was inserted, otherwise returns an existing class with
   // the same descriptor and ClassLoader.
   mirror::Class* InsertClass(const char* descriptor, mirror::Class* klass, size_t hash)
@@ -444,7 +444,7 @@
 
   mirror::ObjectArray<mirror::Class>* GetClassRoots() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
     mirror::ObjectArray<mirror::Class>* class_roots = class_roots_.Read();
-    DCHECK(class_roots != NULL);
+    DCHECK(class_roots != nullptr);
     return class_roots;
   }
 
diff --git a/runtime/common_runtime_test.cc b/runtime/common_runtime_test.cc
index e17b885..7a711cc 100644
--- a/runtime/common_runtime_test.cc
+++ b/runtime/common_runtime_test.cc
@@ -81,7 +81,7 @@
 }
 
 ScratchFile::ScratchFile(File* file) {
-  CHECK(file != NULL);
+  CHECK(file != nullptr);
   filename_ = file->GetPath();
   file_.reset(file);
 }
@@ -559,7 +559,7 @@
   std::string location;
   if (IsHost()) {
     const char* host_dir = getenv("ANDROID_HOST_OUT");
-    CHECK(host_dir != NULL);
+    CHECK(host_dir != nullptr);
     location = StringPrintf("%s/framework/core.%s", host_dir, suffix);
   } else {
     location = StringPrintf("/data/art-test/core.%s", suffix);
diff --git a/runtime/common_throws.cc b/runtime/common_throws.cc
index 407746f..0808999 100644
--- a/runtime/common_throws.cc
+++ b/runtime/common_throws.cc
@@ -35,7 +35,7 @@
 
 static void AddReferrerLocation(std::ostream& os, mirror::Class* referrer)
     SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
-  if (referrer != NULL) {
+  if (referrer != nullptr) {
     std::string location(referrer->GetLocation());
     if (!location.empty()) {
       os << " (declaration of '" << PrettyDescriptor(referrer)
@@ -45,10 +45,10 @@
 }
 
 static void ThrowException(const char* exception_descriptor,
-                           mirror::Class* referrer, const char* fmt, va_list* args = NULL)
+                           mirror::Class* referrer, const char* fmt, va_list* args = nullptr)
     SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
   std::ostringstream msg;
-  if (args != NULL) {
+  if (args != nullptr) {
     std::string vmsg;
     StringAppendV(&vmsg, fmt, *args);
     msg << vmsg;
@@ -61,10 +61,10 @@
 }
 
 static void ThrowWrappedException(const char* exception_descriptor,
-                                  mirror::Class* referrer, const char* fmt, va_list* args = NULL)
+                                  mirror::Class* referrer, const char* fmt, va_list* args = nullptr)
     SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
   std::ostringstream msg;
-  if (args != NULL) {
+  if (args != nullptr) {
     std::string vmsg;
     StringAppendV(&vmsg, fmt, *args);
     msg << vmsg;
@@ -79,7 +79,7 @@
 // AbstractMethodError
 
 void ThrowAbstractMethodError(mirror::ArtMethod* method) {
-  ThrowException("Ljava/lang/AbstractMethodError;", NULL,
+  ThrowException("Ljava/lang/AbstractMethodError;", nullptr,
                  StringPrintf("abstract method \"%s\"",
                               PrettyMethod(method).c_str()).c_str());
 }
@@ -87,20 +87,20 @@
 // ArithmeticException
 
 void ThrowArithmeticExceptionDivideByZero() {
-  ThrowException("Ljava/lang/ArithmeticException;", NULL, "divide by zero");
+  ThrowException("Ljava/lang/ArithmeticException;", nullptr, "divide by zero");
 }
 
 // ArrayIndexOutOfBoundsException
 
 void ThrowArrayIndexOutOfBoundsException(int index, int length) {
-  ThrowException("Ljava/lang/ArrayIndexOutOfBoundsException;", NULL,
+  ThrowException("Ljava/lang/ArrayIndexOutOfBoundsException;", nullptr,
                  StringPrintf("length=%d; index=%d", length, index).c_str());
 }
 
 // ArrayStoreException
 
 void ThrowArrayStoreException(mirror::Class* element_class, mirror::Class* array_class) {
-  ThrowException("Ljava/lang/ArrayStoreException;", NULL,
+  ThrowException("Ljava/lang/ArrayStoreException;", nullptr,
                  StringPrintf("%s cannot be stored in an array of type %s",
                               PrettyDescriptor(element_class).c_str(),
                               PrettyDescriptor(array_class).c_str()).c_str());
@@ -109,14 +109,14 @@
 // ClassCastException
 
 void ThrowClassCastException(mirror::Class* dest_type, mirror::Class* src_type) {
-  ThrowException("Ljava/lang/ClassCastException;", NULL,
+  ThrowException("Ljava/lang/ClassCastException;", nullptr,
                  StringPrintf("%s cannot be cast to %s",
                               PrettyDescriptor(src_type).c_str(),
                               PrettyDescriptor(dest_type).c_str()).c_str());
 }
 
 void ThrowClassCastException(const char* msg) {
-  ThrowException("Ljava/lang/ClassCastException;", NULL, msg);
+  ThrowException("Ljava/lang/ClassCastException;", nullptr, msg);
 }
 
 // ClassCircularityError
@@ -174,7 +174,7 @@
   msg << "Final field '" << PrettyField(accessed, false) << "' cannot be written to by method '"
       << PrettyMethod(referrer) << "'";
   ThrowException("Ljava/lang/IllegalAccessError;",
-                 referrer != NULL ? referrer->GetClass() : NULL,
+                 referrer != nullptr ? referrer->GetClass() : nullptr,
                  msg.str().c_str());
 }
 
@@ -188,13 +188,13 @@
 // IllegalAccessException
 
 void ThrowIllegalAccessException(const char* msg) {
-  ThrowException("Ljava/lang/IllegalAccessException;", NULL, msg);
+  ThrowException("Ljava/lang/IllegalAccessException;", nullptr, msg);
 }
 
 // IllegalArgumentException
 
 void ThrowIllegalArgumentException(const char* msg) {
-  ThrowException("Ljava/lang/IllegalArgumentException;", NULL, msg);
+  ThrowException("Ljava/lang/IllegalArgumentException;", nullptr, msg);
 }
 
 
@@ -207,7 +207,7 @@
   msg << "The method '" << PrettyMethod(method) << "' was expected to be of type "
       << expected_type << " but instead was found to be of type " << found_type;
   ThrowException("Ljava/lang/IncompatibleClassChangeError;",
-                 referrer != NULL ? referrer->GetClass() : NULL,
+                 referrer != nullptr ? referrer->GetClass() : nullptr,
                  msg.str().c_str());
 }
 
@@ -216,14 +216,14 @@
                                                                 mirror::ArtMethod* referrer) {
   // Referrer is calling interface_method on this_object, however, the interface_method isn't
   // implemented by this_object.
-  CHECK(this_object != NULL);
+  CHECK(this_object != nullptr);
   std::ostringstream msg;
   msg << "Class '" << PrettyDescriptor(this_object->GetClass())
       << "' does not implement interface '"
       << PrettyDescriptor(interface_method->GetDeclaringClass())
       << "' in call to '" << PrettyMethod(interface_method) << "'";
   ThrowException("Ljava/lang/IncompatibleClassChangeError;",
-                 referrer != NULL ? referrer->GetClass() : NULL,
+                 referrer != nullptr ? referrer->GetClass() : nullptr,
                  msg.str().c_str());
 }
 
@@ -249,14 +249,14 @@
 void ThrowIOException(const char* fmt, ...) {
   va_list args;
   va_start(args, fmt);
-  ThrowException("Ljava/io/IOException;", NULL, fmt, &args);
+  ThrowException("Ljava/io/IOException;", nullptr, fmt, &args);
   va_end(args);
 }
 
 void ThrowWrappedIOException(const char* fmt, ...) {
   va_list args;
   va_start(args, fmt);
-  ThrowWrappedException("Ljava/io/IOException;", NULL, fmt, &args);
+  ThrowWrappedException("Ljava/io/IOException;", nullptr, fmt, &args);
   va_end(args);
 }
 
@@ -272,12 +272,12 @@
 // NegativeArraySizeException
 
 void ThrowNegativeArraySizeException(int size) {
-  ThrowException("Ljava/lang/NegativeArraySizeException;", NULL,
+  ThrowException("Ljava/lang/NegativeArraySizeException;", nullptr,
                  StringPrintf("%d", size).c_str());
 }
 
 void ThrowNegativeArraySizeException(const char* msg) {
-  ThrowException("Ljava/lang/NegativeArraySizeException;", NULL, msg);
+  ThrowException("Ljava/lang/NegativeArraySizeException;", nullptr, msg);
 }
 
 // NoSuchFieldError
@@ -319,7 +319,7 @@
   std::ostringstream msg;
   msg << "Attempt to " << (is_read ? "read from" : "write to")
       << " field '" << PrettyField(field, true) << "' on a null object reference";
-  ThrowException("Ljava/lang/NullPointerException;", NULL, msg.str().c_str());
+  ThrowException("Ljava/lang/NullPointerException;", nullptr, msg.str().c_str());
 }
 
 static void ThrowNullPointerExceptionForMethodAccessImpl(uint32_t method_idx,
@@ -329,7 +329,7 @@
   std::ostringstream msg;
   msg << "Attempt to invoke " << type << " method '"
       << PrettyMethod(method_idx, dex_file, true) << "' on a null object reference";
-  ThrowException("Ljava/lang/NullPointerException;", NULL, msg.str().c_str());
+  ThrowException("Ljava/lang/NullPointerException;", nullptr, msg.str().c_str());
 }
 
 void ThrowNullPointerExceptionForMethodAccess(uint32_t method_idx,
@@ -379,7 +379,7 @@
       // method is invoked at this location.
       mirror::ArtMethod* invoked_method =
           verifier::MethodVerifier::FindInvokedMethodAtDexPc(method, throw_dex_pc);
-      if (invoked_method != NULL) {
+      if (invoked_method != nullptr) {
         // NPE with precise message.
         ThrowNullPointerExceptionForMethodAccess(invoked_method, kVirtual);
       } else {
@@ -411,7 +411,7 @@
       // field is accessed at this location.
       ArtField* field =
           verifier::MethodVerifier::FindAccessedFieldAtDexPc(method, throw_dex_pc);
-      if (field != NULL) {
+      if (field != nullptr) {
         // NPE with precise message.
         ThrowNullPointerExceptionForFieldAccess(field, true /* read */);
       } else {
@@ -443,7 +443,7 @@
       // field is accessed at this location.
       ArtField* field =
           verifier::MethodVerifier::FindAccessedFieldAtDexPc(method, throw_dex_pc);
-      if (field != NULL) {
+      if (field != nullptr) {
         // NPE with precise message.
         ThrowNullPointerExceptionForFieldAccess(field, false /* write */);
       } else {
@@ -459,7 +459,7 @@
     case Instruction::AGET_BYTE:
     case Instruction::AGET_CHAR:
     case Instruction::AGET_SHORT:
-      ThrowException("Ljava/lang/NullPointerException;", NULL,
+      ThrowException("Ljava/lang/NullPointerException;", nullptr,
                      "Attempt to read from null array");
       break;
     case Instruction::APUT:
@@ -469,11 +469,11 @@
     case Instruction::APUT_BYTE:
     case Instruction::APUT_CHAR:
     case Instruction::APUT_SHORT:
-      ThrowException("Ljava/lang/NullPointerException;", NULL,
+      ThrowException("Ljava/lang/NullPointerException;", nullptr,
                      "Attempt to write to null array");
       break;
     case Instruction::ARRAY_LENGTH:
-      ThrowException("Ljava/lang/NullPointerException;", NULL,
+      ThrowException("Ljava/lang/NullPointerException;", nullptr,
                      "Attempt to get length of null array");
       break;
     default: {
@@ -481,7 +481,7 @@
       //       message/logging is so we can improve any cases we've missed in the future.
       const DexFile* dex_file =
           method->GetDeclaringClass()->GetDexCache()->GetDexFile();
-      ThrowException("Ljava/lang/NullPointerException;", NULL,
+      ThrowException("Ljava/lang/NullPointerException;", nullptr,
                      StringPrintf("Null pointer exception during instruction '%s'",
                                   instr->DumpString(dex_file).c_str()).c_str());
       break;
@@ -490,7 +490,7 @@
 }
 
 void ThrowNullPointerException(const char* msg) {
-  ThrowException("Ljava/lang/NullPointerException;", NULL, msg);
+  ThrowException("Ljava/lang/NullPointerException;", nullptr, msg);
 }
 
 // RuntimeException
@@ -498,7 +498,7 @@
 void ThrowRuntimeException(const char* fmt, ...) {
   va_list args;
   va_start(args, fmt);
-  ThrowException("Ljava/lang/RuntimeException;", NULL, fmt, &args);
+  ThrowException("Ljava/lang/RuntimeException;", nullptr, fmt, &args);
   va_end(args);
 }
 
diff --git a/runtime/debugger.cc b/runtime/debugger.cc
index c074b54..f3ce552 100644
--- a/runtime/debugger.cc
+++ b/runtime/debugger.cc
@@ -174,7 +174,8 @@
   jobject type_;  // This is a weak global.
   size_t byte_count_;
   uint16_t thin_lock_id_;
-  AllocRecordStackTraceElement stack_[kMaxAllocRecordStackDepth];  // Unused entries have nullptr method.
+  // Unused entries have null method.
+  AllocRecordStackTraceElement stack_[kMaxAllocRecordStackDepth];
 };
 
 class Breakpoint {
@@ -714,7 +715,7 @@
   mirror::Object* o = gRegistry->Get<mirror::Object*>(class_id, &error);
   if (o == nullptr) {
     if (error == JDWP::ERR_NONE) {
-      return "NULL";
+      return "null";
     } else {
       return StringPrintf("invalid object %p", reinterpret_cast<void*>(class_id));
     }
@@ -727,7 +728,7 @@
 
 std::string Dbg::GetClassName(mirror::Class* klass) {
   if (klass == nullptr) {
-    return "NULL";
+    return "null";
   }
   std::string temp;
   return DescriptorToName(klass->GetDescriptor(&temp));
@@ -1409,7 +1410,7 @@
 std::string Dbg::GetMethodName(JDWP::MethodId method_id) {
   mirror::ArtMethod* m = FromMethodId(method_id);
   if (m == nullptr) {
-    return "NULL";
+    return "null";
   }
   return m->GetName();
 }
@@ -1417,7 +1418,7 @@
 std::string Dbg::GetFieldName(JDWP::FieldId field_id) {
   ArtField* f = FromFieldId(field_id);
   if (f == nullptr) {
-    return "NULL";
+    return "null";
   }
   return f->GetName();
 }
@@ -1721,7 +1722,7 @@
   if (receiver_class == nullptr && o != nullptr) {
     receiver_class = o->GetClass();
   }
-  // TODO: should we give up now if receiver_class is nullptr?
+  // TODO: should we give up now if receiver_class is null?
   if (receiver_class != nullptr && !f->GetDeclaringClass()->IsAssignableFrom(receiver_class)) {
     LOG(INFO) << "ERR_INVALID_FIELDID: " << PrettyField(f) << " " << PrettyClass(receiver_class);
     return JDWP::ERR_INVALID_FIELDID;
@@ -2176,7 +2177,7 @@
     }
     mirror::Object* peer = t->GetPeer();
     if (peer == nullptr) {
-      // peer might be NULL if the thread is still starting up. We can't tell the debugger about
+      // peer might be null if the thread is still starting up. We can't tell the debugger about
       // this thread yet.
       // TODO: if we identified threads to the debugger by their Thread*
       // rather than their peer's mirror::Object*, we could fix this.
@@ -3390,7 +3391,7 @@
 }
 
 bool Dbg::IsForcedInstrumentationNeededForResolutionImpl(Thread* thread, mirror::ArtMethod* m) {
-  // The upcall can be nullptr and in that case we don't need to do anything.
+  // The upcall can be null and in that case we don't need to do anything.
   if (m == nullptr) {
     return false;
   }
@@ -3427,7 +3428,7 @@
 }
 
 bool Dbg::IsForcedInterpreterNeededForUpcallImpl(Thread* thread, mirror::ArtMethod* m) {
-  // The upcall can be nullptr and in that case we don't need to do anything.
+  // The upcall can be null and in that case we don't need to do anything.
   if (m == nullptr) {
     return false;
   }
diff --git a/runtime/debugger.h b/runtime/debugger.h
index c287121..fe90eb6 100644
--- a/runtime/debugger.h
+++ b/runtime/debugger.h
@@ -68,7 +68,7 @@
   GcRoot<mirror::Class> klass;
   GcRoot<mirror::ArtMethod> method;
   const uint32_t arg_count;
-  uint64_t* const arg_values;   // will be NULL if arg_count_ == 0
+  uint64_t* const arg_values;   // will be null if arg_count_ == 0
   const uint32_t options;
 
   /* result */
diff --git a/runtime/dex_file-inl.h b/runtime/dex_file-inl.h
index c68fdca..760006a 100644
--- a/runtime/dex_file-inl.h
+++ b/runtime/dex_file-inl.h
@@ -32,7 +32,7 @@
 
 inline const char* DexFile::GetStringDataAndUtf16Length(const StringId& string_id,
                                                         uint32_t* utf16_length) const {
-  DCHECK(utf16_length != NULL) << GetLocation();
+  DCHECK(utf16_length != nullptr) << GetLocation();
   const uint8_t* ptr = begin_ + string_id.string_data_off_;
   *utf16_length = DecodeUnsignedLeb128(&ptr);
   return reinterpret_cast<const char*>(ptr);
diff --git a/runtime/dex_file.cc b/runtime/dex_file.cc
index 03a47a3..0589cdd 100644
--- a/runtime/dex_file.cc
+++ b/runtime/dex_file.cc
@@ -57,7 +57,7 @@
 const uint8_t DexFile::kDexMagicVersion[] = { '0', '3', '5', '\0' };
 
 static int OpenAndReadMagic(const char* filename, uint32_t* magic, std::string* error_msg) {
-  CHECK(magic != NULL);
+  CHECK(magic != nullptr);
   ScopedFd fd(open(filename, O_RDONLY, 0));
   if (fd.get() == -1) {
     *error_msg = StringPrintf("Unable to open '%s' : %s", filename, strerror(errno));
@@ -77,7 +77,7 @@
 }
 
 bool DexFile::GetChecksum(const char* filename, uint32_t* checksum, std::string* error_msg) {
-  CHECK(checksum != NULL);
+  CHECK(checksum != nullptr);
   uint32_t magic;
 
   // Strip ":...", which is the location
@@ -98,14 +98,15 @@
     return false;
   }
   if (IsZipMagic(magic)) {
-    std::unique_ptr<ZipArchive> zip_archive(ZipArchive::OpenFromFd(fd.release(), filename, error_msg));
-    if (zip_archive.get() == NULL) {
+    std::unique_ptr<ZipArchive> zip_archive(
+        ZipArchive::OpenFromFd(fd.release(), filename, error_msg));
+    if (zip_archive.get() == nullptr) {
       *error_msg = StringPrintf("Failed to open zip archive '%s' (error msg: %s)", file_part,
                                 error_msg->c_str());
       return false;
     }
     std::unique_ptr<ZipEntry> zip_entry(zip_archive->Find(zip_entry_name, error_msg));
-    if (zip_entry.get() == NULL) {
+    if (zip_entry.get() == nullptr) {
       *error_msg = StringPrintf("Zip archive '%s' doesn't contain %s (error msg: %s)", file_part,
                                 zip_entry_name, error_msg->c_str());
       return false;
@@ -114,8 +115,9 @@
     return true;
   }
   if (IsDexMagic(magic)) {
-    std::unique_ptr<const DexFile> dex_file(DexFile::OpenFile(fd.release(), filename, false, error_msg));
-    if (dex_file.get() == NULL) {
+    std::unique_ptr<const DexFile> dex_file(
+        DexFile::OpenFile(fd.release(), filename, false, error_msg));
+    if (dex_file.get() == nullptr) {
       return false;
     }
     *checksum = dex_file->GetHeader().checksum_;
@@ -127,7 +129,7 @@
 
 bool DexFile::Open(const char* filename, const char* location, std::string* error_msg,
                    std::vector<std::unique_ptr<const DexFile>>* dex_files) {
-  DCHECK(dex_files != nullptr) << "DexFile::Open: out-param is NULL";
+  DCHECK(dex_files != nullptr) << "DexFile::Open: out-param is nullptr";
   uint32_t magic;
   ScopedFd fd(OpenAndReadMagic(filename, &magic, error_msg));
   if (fd.get() == -1) {
@@ -152,7 +154,7 @@
 }
 
 int DexFile::GetPermissions() const {
-  if (mem_map_.get() == NULL) {
+  if (mem_map_.get() == nullptr) {
     return 0;
   } else {
     return mem_map_->GetProtect();
@@ -165,7 +167,7 @@
 
 bool DexFile::EnableWrite() const {
   CHECK(IsReadOnly());
-  if (mem_map_.get() == NULL) {
+  if (mem_map_.get() == nullptr) {
     return false;
   } else {
     return mem_map_->Protect(PROT_READ | PROT_WRITE);
@@ -174,7 +176,7 @@
 
 bool DexFile::DisableWrite() const {
   CHECK(!IsReadOnly());
-  if (mem_map_.get() == NULL) {
+  if (mem_map_.get() == nullptr) {
     return false;
   } else {
     return mem_map_->Protect(PROT_READ);
@@ -233,7 +235,7 @@
 
 bool DexFile::OpenZip(int fd, const std::string& location, std::string* error_msg,
                       std::vector<std::unique_ptr<const DexFile>>* dex_files) {
-  DCHECK(dex_files != nullptr) << "DexFile::OpenZip: out-param is NULL";
+  DCHECK(dex_files != nullptr) << "DexFile::OpenZip: out-param is nullptr";
   std::unique_ptr<ZipArchive> zip_archive(ZipArchive::OpenFromFd(fd, location.c_str(), error_msg));
   if (zip_archive.get() == nullptr) {
     DCHECK(!error_msg->empty());
@@ -260,12 +262,12 @@
                                              ZipOpenErrorCode* error_code) {
   CHECK(!location.empty());
   std::unique_ptr<ZipEntry> zip_entry(zip_archive.Find(entry_name, error_msg));
-  if (zip_entry.get() == NULL) {
+  if (zip_entry.get() == nullptr) {
     *error_code = ZipOpenErrorCode::kEntryNotFound;
     return nullptr;
   }
   std::unique_ptr<MemMap> map(zip_entry->ExtractToMemMap(location.c_str(), entry_name, error_msg));
-  if (map.get() == NULL) {
+  if (map.get() == nullptr) {
     *error_msg = StringPrintf("Failed to extract '%s' from '%s': %s", entry_name, location.c_str(),
                               error_msg->c_str());
     *error_code = ZipOpenErrorCode::kExtractToMemoryError;
@@ -297,7 +299,7 @@
 bool DexFile::OpenFromZip(const ZipArchive& zip_archive, const std::string& location,
                           std::string* error_msg,
                           std::vector<std::unique_ptr<const DexFile>>* dex_files) {
-  DCHECK(dex_files != nullptr) << "DexFile::OpenFromZip: out-param is NULL";
+  DCHECK(dex_files != nullptr) << "DexFile::OpenFromZip: out-param is nullptr";
   ZipOpenErrorCode error_code;
   std::unique_ptr<const DexFile> dex_file(Open(zip_archive, kClassesDex, location, error_msg,
                                                &error_code));
@@ -371,7 +373,7 @@
       find_class_def_misses_(0),
       class_def_index_(nullptr),
       oat_dex_file_(oat_dex_file) {
-  CHECK(begin_ != NULL) << GetLocation();
+  CHECK(begin_ != nullptr) << GetLocation();
   CHECK_GT(size_, 0U) << GetLocation();
 }
 
@@ -487,7 +489,7 @@
       return &class_def;
     }
   }
-  return NULL;
+  return nullptr;
 }
 
 const DexFile::FieldId* DexFile::FindFieldId(const DexFile::TypeId& declaring_klass,
@@ -522,7 +524,7 @@
       }
     }
   }
-  return NULL;
+  return nullptr;
 }
 
 const DexFile::MethodId* DexFile::FindMethodId(const DexFile::TypeId& declaring_klass,
@@ -557,7 +559,7 @@
       }
     }
   }
-  return NULL;
+  return nullptr;
 }
 
 const DexFile::StringId* DexFile::FindStringId(const char* string) const {
@@ -576,7 +578,7 @@
       return &str_id;
     }
   }
-  return NULL;
+  return nullptr;
 }
 
 const DexFile::StringId* DexFile::FindStringId(const uint16_t* string, size_t length) const {
@@ -595,7 +597,7 @@
       return &str_id;
     }
   }
-  return NULL;
+  return nullptr;
 }
 
 const DexFile::TypeId* DexFile::FindTypeId(uint32_t string_idx) const {
@@ -612,7 +614,7 @@
       return &type_id;
     }
   }
-  return NULL;
+  return nullptr;
 }
 
 const DexFile::ProtoId* DexFile::FindProtoId(uint16_t return_type_idx,
@@ -648,7 +650,7 @@
       return &proto;
     }
   }
-  return NULL;
+  return nullptr;
 }
 
 // Given a signature place the type ids into the given vector
@@ -687,11 +689,11 @@
     // TODO: avoid creating a std::string just to get a 0-terminated char array
     std::string descriptor(signature.data() + start_offset, offset - start_offset);
     const DexFile::StringId* string_id = FindStringId(descriptor.c_str());
-    if (string_id == NULL) {
+    if (string_id == nullptr) {
       return false;
     }
     const DexFile::TypeId* type_id = FindTypeId(GetIndexForStringId(*string_id));
-    if (type_id == NULL) {
+    if (type_id == nullptr) {
       return false;
     }
     uint16_t type_idx = GetIndexForTypeId(*type_id);
@@ -713,7 +715,7 @@
     return Signature::NoSignature();
   }
   const ProtoId* proto_id = FindProtoId(return_type_idx, param_type_indices);
-  if (proto_id == NULL) {
+  if (proto_id == nullptr) {
     return Signature::NoSignature();
   }
   return Signature(this, *proto_id);
@@ -727,12 +729,12 @@
   }
 
   const CodeItem* code_item = GetCodeItem(method->GetCodeItemOffset());
-  DCHECK(code_item != NULL) << PrettyMethod(method) << " " << GetLocation();
+  DCHECK(code_item != nullptr) << PrettyMethod(method) << " " << GetLocation();
 
   // A method with no line number info should return -1
   LineNumFromPcContext context(rel_pc, -1);
   DecodeDebugInfo(code_item, method->IsStatic(), method->GetDexMethodIndex(), LineNumForPcCb,
-                  NULL, &context);
+                  nullptr, &context);
   return context.line_num_;
 }
 
@@ -771,19 +773,20 @@
 
 void DexFile::DecodeDebugInfo0(const CodeItem* code_item, bool is_static, uint32_t method_idx,
                                DexDebugNewPositionCb position_cb, DexDebugNewLocalCb local_cb,
-                               void* context, const uint8_t* stream, LocalInfo* local_in_reg) const {
+                               void* context, const uint8_t* stream, LocalInfo* local_in_reg)
+    const {
   uint32_t line = DecodeUnsignedLeb128(&stream);
   uint32_t parameters_size = DecodeUnsignedLeb128(&stream);
   uint16_t arg_reg = code_item->registers_size_ - code_item->ins_size_;
   uint32_t address = 0;
-  bool need_locals = (local_cb != NULL);
+  bool need_locals = (local_cb != nullptr);
 
   if (!is_static) {
     if (need_locals) {
       const char* descriptor = GetMethodDeclaringClassDescriptor(GetMethodId(method_idx));
       local_in_reg[arg_reg].name_ = "this";
       local_in_reg[arg_reg].descriptor_ = descriptor;
-      local_in_reg[arg_reg].signature_ = NULL;
+      local_in_reg[arg_reg].signature_ = nullptr;
       local_in_reg[arg_reg].start_address_ = 0;
       local_in_reg[arg_reg].is_live_ = true;
     }
@@ -803,7 +806,7 @@
       const char* name = StringDataByIdx(id);
       local_in_reg[arg_reg].name_ = name;
       local_in_reg[arg_reg].descriptor_ = descriptor;
-      local_in_reg[arg_reg].signature_ = NULL;
+      local_in_reg[arg_reg].signature_ = nullptr;
       local_in_reg[arg_reg].start_address_ = address;
       local_in_reg[arg_reg].is_live_ = true;
     }
@@ -895,7 +898,7 @@
         }
 
         if (need_locals) {
-          if (local_in_reg[reg].name_ == NULL || local_in_reg[reg].descriptor_ == NULL) {
+          if (local_in_reg[reg].name_ == nullptr || local_in_reg[reg].descriptor_ == nullptr) {
             LOG(ERROR) << "invalid stream - no name or descriptor in " << GetLocation();
             return;
           }
@@ -920,7 +923,7 @@
         address += adjopcode / DBG_LINE_RANGE;
         line += DBG_LINE_BASE + (adjopcode % DBG_LINE_RANGE);
 
-        if (position_cb != NULL) {
+        if (position_cb != nullptr) {
           if (position_cb(context, address, line)) {
             // early exit
             return;
@@ -937,14 +940,16 @@
                               void* context) const {
   DCHECK(code_item != nullptr);
   const uint8_t* stream = GetDebugInfoStream(code_item);
-  std::unique_ptr<LocalInfo[]> local_in_reg(local_cb != NULL ?
+  std::unique_ptr<LocalInfo[]> local_in_reg(local_cb != nullptr ?
                                       new LocalInfo[code_item->registers_size_] :
-                                      NULL);
-  if (stream != NULL) {
-    DecodeDebugInfo0(code_item, is_static, method_idx, position_cb, local_cb, context, stream, &local_in_reg[0]);
+                                      nullptr);
+  if (stream != nullptr) {
+    DecodeDebugInfo0(code_item, is_static, method_idx, position_cb, local_cb, context, stream,
+                     &local_in_reg[0]);
   }
   for (int reg = 0; reg < code_item->registers_size_; reg++) {
-    InvokeLocalCbIfLive(context, reg, code_item->insns_size_in_code_units_, &local_in_reg[0], local_cb);
+    InvokeLocalCbIfLive(context, reg, code_item->insns_size_in_code_units_, &local_in_reg[0],
+                        local_cb);
   }
 }
 
@@ -1051,7 +1056,7 @@
 
 // Decodes the header section from the class data bytes.
 void ClassDataItemIterator::ReadClassDataHeader() {
-  CHECK(ptr_pos_ != NULL);
+  CHECK(ptr_pos_ != nullptr);
   header_.static_fields_size_ = DecodeUnsignedLeb128(&ptr_pos_);
   header_.instance_fields_size_ = DecodeUnsignedLeb128(&ptr_pos_);
   header_.direct_methods_size_ = DecodeUnsignedLeb128(&ptr_pos_);
@@ -1129,17 +1134,16 @@
   return val;
 }
 
-EncodedStaticFieldValueIterator::EncodedStaticFieldValueIterator(const DexFile& dex_file,
-                                                                 Handle<mirror::DexCache>* dex_cache,
-                                                                 Handle<mirror::ClassLoader>* class_loader,
-                                                                 ClassLinker* linker,
-                                                                 const DexFile::ClassDef& class_def)
+EncodedStaticFieldValueIterator::EncodedStaticFieldValueIterator(
+    const DexFile& dex_file, Handle<mirror::DexCache>* dex_cache,
+    Handle<mirror::ClassLoader>* class_loader, ClassLinker* linker,
+    const DexFile::ClassDef& class_def)
     : dex_file_(dex_file), dex_cache_(dex_cache), class_loader_(class_loader), linker_(linker),
       array_size_(), pos_(-1), type_(kByte) {
   DCHECK(dex_cache != nullptr);
   DCHECK(class_loader != nullptr);
   ptr_ = dex_file.GetEncodedStaticFieldValuesArray(class_def);
-  if (ptr_ == NULL) {
+  if (ptr_ == nullptr) {
     array_size_ = 0;
   } else {
     array_size_ = DecodeUnsignedLeb128(&ptr_);
@@ -1199,7 +1203,7 @@
     UNIMPLEMENTED(FATAL) << ": type " << type_;
     UNREACHABLE();
   case kNull:
-    jval_.l = NULL;
+    jval_.l = nullptr;
     width = 0;
     break;
   default:
@@ -1212,7 +1216,8 @@
 template<bool kTransactionActive>
 void EncodedStaticFieldValueIterator::ReadValueToField(ArtField* field) const {
   switch (type_) {
-    case kBoolean: field->SetBoolean<kTransactionActive>(field->GetDeclaringClass(), jval_.z); break;
+    case kBoolean: field->SetBoolean<kTransactionActive>(field->GetDeclaringClass(), jval_.z);
+        break;
     case kByte:    field->SetByte<kTransactionActive>(field->GetDeclaringClass(), jval_.b); break;
     case kShort:   field->SetShort<kTransactionActive>(field->GetDeclaringClass(), jval_.s); break;
     case kChar:    field->SetChar<kTransactionActive>(field->GetDeclaringClass(), jval_.c); break;
@@ -1220,7 +1225,7 @@
     case kLong:    field->SetLong<kTransactionActive>(field->GetDeclaringClass(), jval_.j); break;
     case kFloat:   field->SetFloat<kTransactionActive>(field->GetDeclaringClass(), jval_.f); break;
     case kDouble:  field->SetDouble<kTransactionActive>(field->GetDeclaringClass(), jval_.d); break;
-    case kNull:    field->SetObject<kTransactionActive>(field->GetDeclaringClass(), NULL); break;
+    case kNull:    field->SetObject<kTransactionActive>(field->GetDeclaringClass(), nullptr); break;
     case kString: {
       mirror::String* resolved = linker_->ResolveString(dex_file_, jval_.i, *dex_cache_);
       field->SetObject<kTransactionActive>(field->GetDeclaringClass(), resolved);
@@ -1275,7 +1280,7 @@
     Init(DexFile::GetCatchHandlerData(code_item, offset));
   } else {
     // Not found, initialize as empty
-    current_data_ = NULL;
+    current_data_ = nullptr;
     remaining_count_ = -1;
     catch_all_ = false;
     DCHECK(!HasNext());
diff --git a/runtime/dex_file.h b/runtime/dex_file.h
index 5bdd9b6..0d07358 100644
--- a/runtime/dex_file.h
+++ b/runtime/dex_file.h
@@ -394,7 +394,7 @@
                                              uint32_t location_checksum,
                                              const OatDexFile* oat_dex_file,
                                              std::string* error_msg) {
-    return OpenMemory(base, size, location, location_checksum, NULL, oat_dex_file, error_msg);
+    return OpenMemory(base, size, location, location_checksum, nullptr, oat_dex_file, error_msg);
   }
 
   // Open all classesXXX.dex files from a zip archive.
@@ -448,7 +448,7 @@
   }
 
   const Header& GetHeader() const {
-    DCHECK(header_ != NULL) << GetLocation();
+    DCHECK(header_ != nullptr) << GetLocation();
     return *header_;
   }
 
@@ -463,7 +463,7 @@
 
   // Returns the number of string identifiers in the .dex file.
   size_t NumStringIds() const {
-    DCHECK(header_ != NULL) << GetLocation();
+    DCHECK(header_ != nullptr) << GetLocation();
     return header_->string_ids_size_;
   }
 
@@ -495,7 +495,7 @@
   const char* StringDataAndUtf16LengthByIdx(uint32_t idx, uint32_t* utf16_length) const {
     if (idx == kDexNoIndex) {
       *utf16_length = 0;
-      return NULL;
+      return nullptr;
     }
     const StringId& string_id = GetStringId(idx);
     return GetStringDataAndUtf16Length(string_id, utf16_length);
@@ -514,7 +514,7 @@
 
   // Returns the number of type identifiers in the .dex file.
   uint32_t NumTypeIds() const {
-    DCHECK(header_ != NULL) << GetLocation();
+    DCHECK(header_ != nullptr) << GetLocation();
     return header_->type_ids_size_;
   }
 
@@ -553,7 +553,7 @@
 
   // Returns the number of field identifiers in the .dex file.
   size_t NumFieldIds() const {
-    DCHECK(header_ != NULL) << GetLocation();
+    DCHECK(header_ != nullptr) << GetLocation();
     return header_->field_ids_size_;
   }
 
@@ -593,7 +593,7 @@
 
   // Returns the number of method identifiers in the .dex file.
   size_t NumMethodIds() const {
-    DCHECK(header_ != NULL) << GetLocation();
+    DCHECK(header_ != nullptr) << GetLocation();
     return header_->method_ids_size_;
   }
 
@@ -643,7 +643,7 @@
   }
   // Returns the number of class definitions in the .dex file.
   uint32_t NumClassDefs() const {
-    DCHECK(header_ != NULL) << GetLocation();
+    DCHECK(header_ != nullptr) << GetLocation();
     return header_->class_defs_size_;
   }
 
@@ -673,7 +673,7 @@
 
   const TypeList* GetInterfacesList(const ClassDef& class_def) const {
     if (class_def.interfaces_off_ == 0) {
-        return NULL;
+        return nullptr;
     } else {
       const uint8_t* addr = begin_ + class_def.interfaces_off_;
       return reinterpret_cast<const TypeList*>(addr);
@@ -683,7 +683,7 @@
   // Returns a pointer to the raw memory mapped class_data_item
   const uint8_t* GetClassData(const ClassDef& class_def) const {
     if (class_def.class_data_off_ == 0) {
-      return NULL;
+      return nullptr;
     } else {
       return begin_ + class_def.class_data_off_;
     }
@@ -692,7 +692,7 @@
   //
   const CodeItem* GetCodeItem(const uint32_t code_off) const {
     if (code_off == 0) {
-      return NULL;  // native or abstract method
+      return nullptr;  // native or abstract method
     } else {
       const uint8_t* addr = begin_ + code_off;
       return reinterpret_cast<const CodeItem*>(addr);
@@ -705,7 +705,7 @@
 
   // Returns the number of prototype identifiers in the .dex file.
   size_t NumProtoIds() const {
-    DCHECK(header_ != NULL) << GetLocation();
+    DCHECK(header_ != nullptr) << GetLocation();
     return header_->proto_ids_size_;
   }
 
@@ -745,7 +745,7 @@
 
   const TypeList* GetProtoParameters(const ProtoId& proto_id) const {
     if (proto_id.parameters_off_ == 0) {
-      return NULL;
+      return nullptr;
     } else {
       const uint8_t* addr = begin_ + proto_id.parameters_off_;
       return reinterpret_cast<const TypeList*>(addr);
@@ -778,7 +778,7 @@
   // Get the pointer to the start of the debugging data
   const uint8_t* GetDebugInfoStream(const CodeItem* code_item) const {
     if (code_item->debug_info_off_ == 0) {
-      return NULL;
+      return nullptr;
     } else {
       return begin_ + code_item->debug_info_off_;
     }
@@ -818,7 +818,8 @@
 
   struct LocalInfo {
     LocalInfo()
-        : name_(NULL), descriptor_(NULL), signature_(NULL), start_address_(0), is_live_(false) {}
+        : name_(nullptr), descriptor_(nullptr), signature_(nullptr), start_address_(0),
+          is_live_(false) {}
 
     const char* name_;  // E.g., list
     const char* descriptor_;  // E.g., Ljava/util/LinkedList;
@@ -841,10 +842,10 @@
 
   void InvokeLocalCbIfLive(void* context, int reg, uint32_t end_address,
                            LocalInfo* local_in_reg, DexDebugNewLocalCb local_cb) const {
-    if (local_cb != NULL && local_in_reg[reg].is_live_) {
+    if (local_cb != nullptr && local_in_reg[reg].is_live_) {
       local_cb(context, reg, local_in_reg[reg].start_address_, end_address,
           local_in_reg[reg].name_, local_in_reg[reg].descriptor_,
-          local_in_reg[reg].signature_ != NULL ? local_in_reg[reg].signature_ : "");
+          local_in_reg[reg].signature_ != nullptr ? local_in_reg[reg].signature_ : "");
     }
   }
 
@@ -865,7 +866,7 @@
 
   const char* GetSourceFile(const ClassDef& class_def) const {
     if (class_def.source_file_idx_ == 0xffffffff) {
-      return NULL;
+      return nullptr;
     } else {
       return StringDataByIdx(class_def.source_file_idx_);
     }
@@ -926,7 +927,7 @@
     kVerifyError
   };
 
-  // Opens .dex file from the entry_name in a zip archive. error_code is undefined when non-nullptr
+  // Opens .dex file from the entry_name in a zip archive. error_code is undefined when non-null
   // return.
   static std::unique_ptr<const DexFile> Open(const ZipArchive& zip_archive, const char* entry_name,
                                              const std::string& location, std::string* error_msg,
@@ -1055,7 +1056,7 @@
   DexFileParameterIterator(const DexFile& dex_file, const DexFile::ProtoId& proto_id)
       : dex_file_(dex_file), size_(0), pos_(0) {
     type_list_ = dex_file_.GetProtoParameters(proto_id);
-    if (type_list_ != NULL) {
+    if (type_list_ != nullptr) {
       size_ = type_list_->Size();
     }
   }
diff --git a/runtime/dex_file_test.cc b/runtime/dex_file_test.cc
index 09ef3ee..4d099e1 100644
--- a/runtime/dex_file_test.cc
+++ b/runtime/dex_file_test.cc
@@ -33,7 +33,7 @@
 TEST_F(DexFileTest, Open) {
   ScopedObjectAccess soa(Thread::Current());
   std::unique_ptr<const DexFile> dex(OpenTestDexFile("Nested"));
-  ASSERT_TRUE(dex.get() != NULL);
+  ASSERT_TRUE(dex.get() != nullptr);
 }
 
 static const uint8_t kBase64Map[256] = {
@@ -136,14 +136,14 @@
 static std::unique_ptr<const DexFile> OpenDexFileBase64(const char* base64,
                                                         const char* location) {
   // decode base64
-  CHECK(base64 != NULL);
+  CHECK(base64 != nullptr);
   size_t length;
   std::unique_ptr<uint8_t[]> dex_bytes(DecodeBase64(base64, &length));
-  CHECK(dex_bytes.get() != NULL);
+  CHECK(dex_bytes.get() != nullptr);
 
   // write to provided file
   std::unique_ptr<File> file(OS::CreateEmptyFile(location));
-  CHECK(file.get() != NULL);
+  CHECK(file.get() != nullptr);
   if (!file->WriteFully(dex_bytes.get(), length)) {
     PLOG(FATAL) << "Failed to write base64 as dex file";
   }
@@ -168,7 +168,7 @@
 TEST_F(DexFileTest, Header) {
   ScratchFile tmp;
   std::unique_ptr<const DexFile> raw(OpenDexFileBase64(kRawDex, tmp.GetFilename().c_str()));
-  ASSERT_TRUE(raw.get() != NULL);
+  ASSERT_TRUE(raw.get() != nullptr);
 
   const DexFile::Header& header = raw->GetHeader();
   // TODO: header.magic_
@@ -234,7 +234,7 @@
   ASSERT_STREQ("LGetMethodSignature;", raw->GetClassDescriptor(class_def));
 
   const uint8_t* class_data = raw->GetClassData(class_def);
-  ASSERT_TRUE(class_data != NULL);
+  ASSERT_TRUE(class_data != nullptr);
   ClassDataItemIterator it(*raw, class_data);
 
   EXPECT_EQ(1u, it.NumDirectMethods());
@@ -281,8 +281,8 @@
   EXPECT_EQ(1U, raw->NumClassDefs());
 
   const char* strings[] = { "LGetMethodSignature;", "Ljava/lang/Float;", "Ljava/lang/Object;",
-      "D", "I", "J", NULL };
-  for (size_t i = 0; strings[i] != NULL; i++) {
+      "D", "I", "J", nullptr };
+  for (size_t i = 0; strings[i] != nullptr; i++) {
     const char* str = strings[i];
     const DexFile::StringId* str_id = raw->FindStringId(str);
     const char* dex_str = raw->GetStringData(*str_id);
@@ -294,10 +294,10 @@
   for (size_t i = 0; i < java_lang_dex_file_->NumTypeIds(); i++) {
     const char* type_str = java_lang_dex_file_->StringByTypeIdx(i);
     const DexFile::StringId* type_str_id = java_lang_dex_file_->FindStringId(type_str);
-    ASSERT_TRUE(type_str_id != NULL);
+    ASSERT_TRUE(type_str_id != nullptr);
     uint32_t type_str_idx = java_lang_dex_file_->GetIndexForStringId(*type_str_id);
     const DexFile::TypeId* type_id = java_lang_dex_file_->FindTypeId(type_str_idx);
-    ASSERT_TRUE(type_id != NULL);
+    ASSERT_TRUE(type_id != nullptr);
     EXPECT_EQ(java_lang_dex_file_->GetIndexForTypeId(*type_id), i);
   }
 }
@@ -307,14 +307,14 @@
     const DexFile::ProtoId& to_find = java_lang_dex_file_->GetProtoId(i);
     const DexFile::TypeList* to_find_tl = java_lang_dex_file_->GetProtoParameters(to_find);
     std::vector<uint16_t> to_find_types;
-    if (to_find_tl != NULL) {
+    if (to_find_tl != nullptr) {
       for (size_t j = 0; j < to_find_tl->Size(); j++) {
         to_find_types.push_back(to_find_tl->GetTypeItem(j).type_idx_);
       }
     }
     const DexFile::ProtoId* found =
         java_lang_dex_file_->FindProtoId(to_find.return_type_idx_, to_find_types);
-    ASSERT_TRUE(found != NULL);
+    ASSERT_TRUE(found != nullptr);
     EXPECT_EQ(java_lang_dex_file_->GetIndexForProtoId(*found), i);
   }
 }
@@ -326,7 +326,7 @@
     const DexFile::StringId& name = java_lang_dex_file_->GetStringId(to_find.name_idx_);
     const DexFile::ProtoId& signature = java_lang_dex_file_->GetProtoId(to_find.proto_idx_);
     const DexFile::MethodId* found = java_lang_dex_file_->FindMethodId(klass, name, signature);
-    ASSERT_TRUE(found != NULL) << "Didn't find method " << i << ": "
+    ASSERT_TRUE(found != nullptr) << "Didn't find method " << i << ": "
         << java_lang_dex_file_->StringByTypeIdx(to_find.class_idx_) << "."
         << java_lang_dex_file_->GetStringData(name)
         << java_lang_dex_file_->GetMethodSignature(to_find);
@@ -341,7 +341,7 @@
     const DexFile::StringId& name = java_lang_dex_file_->GetStringId(to_find.name_idx_);
     const DexFile::TypeId& type = java_lang_dex_file_->GetTypeId(to_find.type_idx_);
     const DexFile::FieldId* found = java_lang_dex_file_->FindFieldId(klass, name, type);
-    ASSERT_TRUE(found != NULL) << "Didn't find field " << i << ": "
+    ASSERT_TRUE(found != nullptr) << "Didn't find field " << i << ": "
         << java_lang_dex_file_->StringByTypeIdx(to_find.type_idx_) << " "
         << java_lang_dex_file_->StringByTypeIdx(to_find.class_idx_) << "."
         << java_lang_dex_file_->GetStringData(name);
diff --git a/runtime/dex_file_verifier.cc b/runtime/dex_file_verifier.cc
index a3f3de8..2603975 100644
--- a/runtime/dex_file_verifier.cc
+++ b/runtime/dex_file_verifier.cc
@@ -1473,7 +1473,7 @@
   }
 
   // Check ordering between items.
-  if (previous_item_ != NULL) {
+  if (previous_item_ != nullptr) {
     const DexFile::StringId* prev_item = reinterpret_cast<const DexFile::StringId*>(previous_item_);
     const char* prev_str = dex_file_->GetStringData(*prev_item);
     const char* str = dex_file_->GetStringData(*item);
@@ -1499,7 +1499,7 @@
   }
 
   // Check ordering between items.
-  if (previous_item_ != NULL) {
+  if (previous_item_ != nullptr) {
     const DexFile::TypeId* prev_item = reinterpret_cast<const DexFile::TypeId*>(previous_item_);
     if (UNLIKELY(prev_item->descriptor_idx_ >= item->descriptor_idx_)) {
       ErrorStringPrintf("Out-of-order type_ids: %x then %x",
@@ -1548,7 +1548,7 @@
   }
 
   // Check ordering between items. This relies on type_ids being in order.
-  if (previous_item_ != NULL) {
+  if (previous_item_ != nullptr) {
     const DexFile::ProtoId* prev = reinterpret_cast<const DexFile::ProtoId*>(previous_item_);
     if (UNLIKELY(prev->return_type_idx_ > item->return_type_idx_)) {
       ErrorStringPrintf("Out-of-order proto_id return types");
@@ -1610,7 +1610,7 @@
   }
 
   // Check ordering between items. This relies on the other sections being in order.
-  if (previous_item_ != NULL) {
+  if (previous_item_ != nullptr) {
     const DexFile::FieldId* prev_item = reinterpret_cast<const DexFile::FieldId*>(previous_item_);
     if (UNLIKELY(prev_item->class_idx_ > item->class_idx_)) {
       ErrorStringPrintf("Out-of-order field_ids");
@@ -1657,7 +1657,7 @@
   }
 
   // Check ordering between items. This relies on the other sections being in order.
-  if (previous_item_ != NULL) {
+  if (previous_item_ != nullptr) {
     const DexFile::MethodId* prev_item = reinterpret_cast<const DexFile::MethodId*>(previous_item_);
     if (UNLIKELY(prev_item->class_idx_ > item->class_idx_)) {
       ErrorStringPrintf("Out-of-order method_ids");
@@ -1728,7 +1728,7 @@
   }
 
   const DexFile::TypeList* interfaces = dex_file_->GetInterfacesList(*item);
-  if (interfaces != NULL) {
+  if (interfaces != nullptr) {
     uint32_t size = interfaces->Size();
 
     // Ensure that all interfaces refer to classes (not arrays or primitives).
@@ -1952,7 +1952,7 @@
   }
 
   // Iterate through the items in the section.
-  previous_item_ = NULL;
+  previous_item_ = nullptr;
   for (uint32_t i = 0; i < count; i++) {
     uint32_t new_offset = (offset + alignment_mask) & ~alignment_mask;
     ptr_ = begin_ + new_offset;
diff --git a/runtime/dex_file_verifier.h b/runtime/dex_file_verifier.h
index 18bf2e7..877dfc2 100644
--- a/runtime/dex_file_verifier.h
+++ b/runtime/dex_file_verifier.h
@@ -36,7 +36,7 @@
  private:
   DexFileVerifier(const DexFile* dex_file, const uint8_t* begin, size_t size, const char* location)
       : dex_file_(dex_file), begin_(begin), size_(size), location_(location),
-        header_(&dex_file->GetHeader()), ptr_(NULL), previous_item_(NULL)  {
+        header_(&dex_file->GetHeader()), ptr_(nullptr), previous_item_(nullptr)  {
   }
 
   bool Verify();
@@ -99,12 +99,12 @@
   bool CheckInterSection();
 
   // Load a string by (type) index. Checks whether the index is in bounds, printing the error if
-  // not. If there is an error, nullptr is returned.
+  // not. If there is an error, null is returned.
   const char* CheckLoadStringByIdx(uint32_t idx, const char* error_fmt);
   const char* CheckLoadStringByTypeIdx(uint32_t type_idx, const char* error_fmt);
 
   // Load a field/method Id by index. Checks whether the index is in bounds, printing the error if
-  // not. If there is an error, nullptr is returned.
+  // not. If there is an error, null is returned.
   const DexFile::FieldId* CheckLoadFieldId(uint32_t idx, const char* error_fmt);
   const DexFile::MethodId* CheckLoadMethodId(uint32_t idx, const char* error_fmt);
 
diff --git a/runtime/dex_file_verifier_test.cc b/runtime/dex_file_verifier_test.cc
index 00ca8a9..95a47cc 100644
--- a/runtime/dex_file_verifier_test.cc
+++ b/runtime/dex_file_verifier_test.cc
@@ -105,14 +105,14 @@
                                                         const char* location,
                                                         std::string* error_msg) {
   // decode base64
-  CHECK(base64 != NULL);
+  CHECK(base64 != nullptr);
   size_t length;
   std::unique_ptr<uint8_t[]> dex_bytes(DecodeBase64(base64, &length));
-  CHECK(dex_bytes.get() != NULL);
+  CHECK(dex_bytes.get() != nullptr);
 
   // write to provided file
   std::unique_ptr<File> file(OS::CreateEmptyFile(location));
-  CHECK(file.get() != NULL);
+  CHECK(file.get() != nullptr);
   if (!file->WriteFully(dex_bytes.get(), length)) {
     PLOG(FATAL) << "Failed to write base64 as dex file";
   }
@@ -178,7 +178,7 @@
 
   // write to provided file
   std::unique_ptr<File> file(OS::CreateEmptyFile(location));
-  CHECK(file.get() != NULL);
+  CHECK(file.get() != nullptr);
   if (!file->WriteFully(bytes, length)) {
     PLOG(FATAL) << "Failed to write base64 as dex file";
   }
@@ -205,7 +205,7 @@
   // Decode base64.
   size_t length;
   std::unique_ptr<uint8_t[]> dex_bytes(DecodeBase64(kGoodTestDex, &length));
-  CHECK(dex_bytes.get() != NULL);
+  CHECK(dex_bytes.get() != nullptr);
 
   // Make modifications.
   dex_bytes.get()[offset] = new_val;
diff --git a/runtime/dex_instruction.h b/runtime/dex_instruction.h
index d3b9eb4..c64c21e 100644
--- a/runtime/dex_instruction.h
+++ b/runtime/dex_instruction.h
@@ -183,7 +183,7 @@
 
   // Reads an instruction out of the stream at the specified address.
   static const Instruction* At(const uint16_t* code) {
-    DCHECK(code != NULL);
+    DCHECK(code != nullptr);
     return reinterpret_cast<const Instruction*>(code);
   }
 
diff --git a/runtime/dex_method_iterator.h b/runtime/dex_method_iterator.h
index 14e316f..7fae277 100644
--- a/runtime/dex_method_iterator.h
+++ b/runtime/dex_method_iterator.h
@@ -30,8 +30,8 @@
         found_next_(false),
         dex_file_index_(0),
         class_def_index_(0),
-        class_def_(NULL),
-        class_data_(NULL),
+        class_def_(nullptr),
+        class_data_(nullptr),
         direct_method_(false) {
     CHECK_NE(0U, dex_files_.size());
   }
@@ -51,20 +51,20 @@
         dex_file_index_++;
         continue;
       }
-      if (class_def_ == NULL) {
+      if (class_def_ == nullptr) {
         class_def_ = &GetDexFileInternal().GetClassDef(class_def_index_);
       }
-      if (class_data_ == NULL) {
+      if (class_data_ == nullptr) {
         class_data_ = GetDexFileInternal().GetClassData(*class_def_);
-        if (class_data_ == NULL) {
+        if (class_data_ == nullptr) {
           // empty class, such as a marker interface
           // End of this class, advance and retry.
-          class_def_ = NULL;
+          class_def_ = nullptr;
           class_def_index_++;
           continue;
         }
       }
-      if (it_.get() == NULL) {
+      if (it_.get() == nullptr) {
         it_.reset(new ClassDataItemIterator(GetDexFileInternal(), class_data_));
         // Skip fields
         while (GetIterator().HasNextStaticField()) {
@@ -88,16 +88,16 @@
       }
       // End of this class, advance and retry.
       DCHECK(!GetIterator().HasNext());
-      it_.reset(NULL);
-      class_data_ = NULL;
-      class_def_ = NULL;
+      it_.reset(nullptr);
+      class_data_ = nullptr;
+      class_def_ = nullptr;
       class_def_index_++;
     }
   }
 
   void Next() {
     found_next_ = false;
-    if (it_.get() != NULL) {
+    if (it_.get() != nullptr) {
       // Advance to next method if we currently are looking at a class.
       GetIterator().Next();
     }
@@ -115,20 +115,20 @@
 
   InvokeType GetInvokeType() {
     CHECK(HasNext());
-    CHECK(class_def_ != NULL);
+    CHECK(class_def_ != nullptr);
     return GetIterator().GetMethodInvokeType(*class_def_);
   }
 
  private:
   ClassDataItemIterator& GetIterator() const {
-    CHECK(it_.get() != NULL);
+    CHECK(it_.get() != nullptr);
     return *it_.get();
   }
 
   const DexFile& GetDexFileInternal() const {
     CHECK_LT(dex_file_index_, dex_files_.size());
     const DexFile* dex_file = dex_files_[dex_file_index_];
-    CHECK(dex_file != NULL);
+    CHECK(dex_file != nullptr);
     return *dex_file;
   }
 
diff --git a/runtime/elf_file.cc b/runtime/elf_file.cc
index 8969e29..e909e64 100644
--- a/runtime/elf_file.cc
+++ b/runtime/elf_file.cc
@@ -1026,13 +1026,13 @@
       return dyn;
     }
   }
-  return NULL;
+  return nullptr;
 }
 
 template <typename ElfTypes>
 typename ElfTypes::Word ElfFileImpl<ElfTypes>::FindDynamicValueByType(Elf_Sword type) const {
   Elf_Dyn* dyn = FindDynamicByType(type);
-  if (dyn == NULL) {
+  if (dyn == nullptr) {
     return 0;
   } else {
     return dyn->d_un.d_val;
@@ -1567,7 +1567,7 @@
   CHECK_NE(0U, section_headers.size());
   CHECK_EQ(section_headers.size(), section_headers_original_indexes.size());
 
-  // section 0 is the NULL section, sections start at offset of first section
+  // section 0 is the null section, sections start at offset of first section
   CHECK(GetSectionHeader(1) != nullptr);
   Elf_Off offset = GetSectionHeader(1)->sh_offset;
   for (size_t i = 1; i < section_headers.size(); i++) {
diff --git a/runtime/elf_file_impl.h b/runtime/elf_file_impl.h
index 047849a..80950c6 100644
--- a/runtime/elf_file_impl.h
+++ b/runtime/elf_file_impl.h
@@ -94,8 +94,7 @@
                              const std::string& symbol_name,
                              bool build_map);
 
-  // Lookup a string given string section and offset. Returns nullptr for
-  // special 0 offset.
+  // Lookup a string given string section and offset. Returns null for special 0 offset.
   const char* GetString(Elf_Shdr&, Elf_Word) const;
 
   Elf_Word GetDynamicNum() const;
@@ -167,7 +166,7 @@
   // Check whether the offset is in range, and set to target to Begin() + offset if OK.
   bool CheckAndSet(Elf32_Off offset, const char* label, uint8_t** target, std::string* error_msg);
 
-  // Find symbol in specified table, returning nullptr if it is not found.
+  // Find symbol in specified table, returning null if it is not found.
   //
   // If build_map is true, builds a map to speed repeated access. The
   // map does not included untyped symbol values (aka STT_NOTYPE)
@@ -184,7 +183,7 @@
   Elf_Dyn* FindDynamicByType(Elf_Sword type) const;
   Elf_Word FindDynamicValueByType(Elf_Sword type) const;
 
-  // Lookup a string by section type. Returns nullptr for special 0 offset.
+  // Lookup a string by section type. Returns null for special 0 offset.
   const char* GetString(Elf_Word section_type, Elf_Word) const;
 
   const File* const file_;
diff --git a/runtime/entrypoints/entrypoint_utils-inl.h b/runtime/entrypoints/entrypoint_utils-inl.h
index cbfba12..64b7ecd 100644
--- a/runtime/entrypoints/entrypoint_utils-inl.h
+++ b/runtime/entrypoints/entrypoint_utils-inl.h
@@ -41,10 +41,10 @@
                                        mirror::ArtMethod* method,
                                        Thread* self, bool* slow_path) {
   mirror::Class* klass = method->GetDexCacheResolvedType<false>(type_idx);
-  if (UNLIKELY(klass == NULL)) {
+  if (UNLIKELY(klass == nullptr)) {
     klass = Runtime::Current()->GetClassLinker()->ResolveType(type_idx, method);
     *slow_path = true;
-    if (klass == NULL) {
+    if (klass == nullptr) {
       DCHECK(self->IsExceptionPending());
       return nullptr;  // Failure
     } else {
@@ -526,19 +526,19 @@
                                          mirror::Object* this_object,
                                          mirror::ArtMethod* referrer,
                                          bool access_check, InvokeType type) {
-  if (UNLIKELY(this_object == NULL && type != kStatic)) {
-    return NULL;
+  if (UNLIKELY(this_object == nullptr && type != kStatic)) {
+    return nullptr;
   }
   mirror::ArtMethod* resolved_method =
       referrer->GetDeclaringClass()->GetDexCache()->GetResolvedMethod(method_idx);
-  if (UNLIKELY(resolved_method == NULL)) {
-    return NULL;
+  if (UNLIKELY(resolved_method == nullptr)) {
+    return nullptr;
   }
   if (access_check) {
     // Check for incompatible class change errors and access.
     bool icce = resolved_method->CheckIncompatibleClassChange(type);
     if (UNLIKELY(icce)) {
-      return NULL;
+      return nullptr;
     }
     mirror::Class* methods_class = resolved_method->GetDeclaringClass();
     mirror::Class* referring_class = referrer->GetDeclaringClass();
@@ -546,7 +546,7 @@
                  !referring_class->CanAccessMember(methods_class,
                                                    resolved_method->GetAccessFlags()))) {
       // Potential illegal access, may need to refine the method's class.
-      return NULL;
+      return nullptr;
     }
   }
   if (type == kInterface) {  // Most common form of slow path dispatch.
@@ -606,7 +606,7 @@
 
 inline void UnlockJniSynchronizedMethod(jobject locked, Thread* self) {
   // Save any pending exception over monitor exit call.
-  mirror::Throwable* saved_exception = NULL;
+  mirror::Throwable* saved_exception = nullptr;
   if (UNLIKELY(self->IsExceptionPending())) {
     saved_exception = self->GetException();
     self->ClearException();
@@ -620,7 +620,7 @@
         << self->GetException()->Dump();
   }
   // Restore pending exception.
-  if (saved_exception != NULL) {
+  if (saved_exception != nullptr) {
     self->SetException(saved_exception);
   }
 }
diff --git a/runtime/entrypoints/entrypoint_utils.cc b/runtime/entrypoints/entrypoint_utils.cc
index 768f505..ce56739 100644
--- a/runtime/entrypoints/entrypoint_utils.cc
+++ b/runtime/entrypoints/entrypoint_utils.cc
@@ -44,9 +44,9 @@
     return nullptr;  // Failure
   }
   mirror::Class* klass = referrer->GetDexCacheResolvedType<false>(type_idx);
-  if (UNLIKELY(klass == NULL)) {  // Not in dex cache so try to resolve
+  if (UNLIKELY(klass == nullptr)) {  // Not in dex cache so try to resolve
     klass = Runtime::Current()->GetClassLinker()->ResolveType(type_idx, referrer);
-    if (klass == NULL) {  // Error
+    if (klass == nullptr) {  // Error
       DCHECK(self->IsExceptionPending());
       return nullptr;  // Failure
     }
@@ -231,13 +231,13 @@
 
   // Build argument array possibly triggering GC.
   soa.Self()->AssertThreadSuspensionIsAllowable();
-  jobjectArray args_jobj = NULL;
+  jobjectArray args_jobj = nullptr;
   const JValue zero;
   int32_t target_sdk_version = Runtime::Current()->GetTargetSdkVersion();
   // Do not create empty arrays unless needed to maintain Dalvik bug compatibility.
   if (args.size() > 0 || (target_sdk_version > 0 && target_sdk_version <= 21)) {
-    args_jobj = soa.Env()->NewObjectArray(args.size(), WellKnownClasses::java_lang_Object, NULL);
-    if (args_jobj == NULL) {
+    args_jobj = soa.Env()->NewObjectArray(args.size(), WellKnownClasses::java_lang_Object, nullptr);
+    if (args_jobj == nullptr) {
       CHECK(soa.Self()->IsExceptionPending());
       return zero;
     }
@@ -249,7 +249,7 @@
         JValue jv;
         jv.SetJ(args.at(i).j);
         mirror::Object* val = BoxPrimitive(Primitive::GetType(shorty[i + 1]), jv);
-        if (val == NULL) {
+        if (val == nullptr) {
           CHECK(soa.Self()->IsExceptionPending());
           return zero;
         }
@@ -270,7 +270,7 @@
 
   // Unbox result and handle error conditions.
   if (LIKELY(!soa.Self()->IsExceptionPending())) {
-    if (shorty[0] == 'V' || (shorty[0] == 'L' && result == NULL)) {
+    if (shorty[0] == 'V' || (shorty[0] == 'L' && result == nullptr)) {
       // Do nothing.
       return zero;
     } else {
@@ -315,7 +315,7 @@
       }
       if (!declares_exception) {
         soa.Self()->ThrowNewWrappedException("Ljava/lang/reflect/UndeclaredThrowableException;",
-                                             NULL);
+                                             nullptr);
       }
     }
     return zero;
diff --git a/runtime/entrypoints/interpreter/interpreter_entrypoints.cc b/runtime/entrypoints/interpreter/interpreter_entrypoints.cc
index 28e19d4..d4844c2 100644
--- a/runtime/entrypoints/interpreter/interpreter_entrypoints.cc
+++ b/runtime/entrypoints/interpreter/interpreter_entrypoints.cc
@@ -47,7 +47,7 @@
       method = shadow_frame->GetMethod();
     }
   }
-  uint16_t arg_offset = (code_item == NULL) ? 0 : code_item->registers_size_ - code_item->ins_size_;
+  uint16_t arg_offset = (code_item == nullptr) ? 0 : code_item->registers_size_ - code_item->ins_size_;
   method->Invoke(self, shadow_frame->GetVRegArgs(arg_offset),
                  (shadow_frame->NumberOfVRegs() - arg_offset) * sizeof(uint32_t),
                  result, method->GetShorty());
diff --git a/runtime/entrypoints/jni/jni_entrypoints.cc b/runtime/entrypoints/jni/jni_entrypoints.cc
index 2752407..a68eeeb 100644
--- a/runtime/entrypoints/jni/jni_entrypoints.cc
+++ b/runtime/entrypoints/jni/jni_entrypoints.cc
@@ -34,15 +34,15 @@
   Locks::mutator_lock_->AssertNotHeld(self);  // We come here as Native.
   ScopedObjectAccess soa(self);
 
-  mirror::ArtMethod* method = self->GetCurrentMethod(NULL);
-  DCHECK(method != NULL);
+  mirror::ArtMethod* method = self->GetCurrentMethod(nullptr);
+  DCHECK(method != nullptr);
 
-  // Lookup symbol address for method, on failure we'll return NULL with an exception set,
+  // Lookup symbol address for method, on failure we'll return null with an exception set,
   // otherwise we return the address of the method we found.
   void* native_code = soa.Vm()->FindCodeForNativeMethod(method);
-  if (native_code == NULL) {
+  if (native_code == nullptr) {
     DCHECK(self->IsExceptionPending());
-    return NULL;
+    return nullptr;
   } else {
     // Register so that future calls don't come here
     method->RegisterNative(native_code, false);
diff --git a/runtime/entrypoints/quick/quick_cast_entrypoints.cc b/runtime/entrypoints/quick/quick_cast_entrypoints.cc
index a6ab69b..37de380 100644
--- a/runtime/entrypoints/quick/quick_cast_entrypoints.cc
+++ b/runtime/entrypoints/quick/quick_cast_entrypoints.cc
@@ -22,8 +22,8 @@
 // Assignable test for code, won't throw.  Null and equality tests already performed
 extern "C" uint32_t artIsAssignableFromCode(mirror::Class* klass, mirror::Class* ref_class)
     SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
-  DCHECK(klass != NULL);
-  DCHECK(ref_class != NULL);
+  DCHECK(klass != nullptr);
+  DCHECK(ref_class != nullptr);
   return klass->IsAssignableFrom(ref_class) ? 1 : 0;
 }
 
diff --git a/runtime/entrypoints/quick/quick_instrumentation_entrypoints.cc b/runtime/entrypoints/quick/quick_instrumentation_entrypoints.cc
index 54dbd8c..eb1b105 100644
--- a/runtime/entrypoints/quick/quick_instrumentation_entrypoints.cc
+++ b/runtime/entrypoints/quick/quick_instrumentation_entrypoints.cc
@@ -41,7 +41,7 @@
   bool interpreter_entry = (result == GetQuickToInterpreterBridge());
   instrumentation->PushInstrumentationStackFrame(self, method->IsStatic() ? nullptr : this_object,
                                                  method, lr, interpreter_entry);
-  CHECK(result != NULL) << PrettyMethod(method);
+  CHECK(result != nullptr) << PrettyMethod(method);
   return result;
 }
 
diff --git a/runtime/entrypoints/quick/quick_throw_entrypoints.cc b/runtime/entrypoints/quick/quick_throw_entrypoints.cc
index 9644b98..f22edc1 100644
--- a/runtime/entrypoints/quick/quick_throw_entrypoints.cc
+++ b/runtime/entrypoints/quick/quick_throw_entrypoints.cc
@@ -34,10 +34,10 @@
 extern "C" NO_RETURN void artDeliverExceptionFromCode(mirror::Throwable* exception, Thread* self)
     SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
   /*
-   * exception may be NULL, in which case this routine should
+   * exception may be null, in which case this routine should
    * throw NPE.  NOTE: this is a convenience for generated code,
    * which previously did the null check inline and constructed
-   * and threw a NPE if NULL.  This routine responsible for setting
+   * and threw a NPE if null.  This routine responsible for setting
    * exception_ in thread and delivering the exception.
    */
   ScopedQuickEntrypointChecks sqec(self);
diff --git a/runtime/exception_test.cc b/runtime/exception_test.cc
index 1770658..6808000 100644
--- a/runtime/exception_test.cc
+++ b/runtime/exception_test.cc
@@ -200,7 +200,7 @@
   fake_stack.push_back(0);
   fake_stack.push_back(0xEBAD6070);  // return pc
 
-  // Push Method* of NULL to terminate the trace
+  // Push Method* of null to terminate the trace
   fake_stack.push_back(0);
 
   // Push null values which will become null incoming arguments.
diff --git a/runtime/gc/accounting/atomic_stack.h b/runtime/gc/accounting/atomic_stack.h
index 5224d64..399832a 100644
--- a/runtime/gc/accounting/atomic_stack.h
+++ b/runtime/gc/accounting/atomic_stack.h
@@ -238,9 +238,9 @@
     std::string error_msg;
     mem_map_.reset(MemMap::MapAnonymous(name_.c_str(), nullptr, capacity_ * sizeof(begin_[0]),
                                         PROT_READ | PROT_WRITE, false, false, &error_msg));
-    CHECK(mem_map_.get() != NULL) << "couldn't allocate mark stack.\n" << error_msg;
+    CHECK(mem_map_.get() != nullptr) << "couldn't allocate mark stack.\n" << error_msg;
     uint8_t* addr = mem_map_->Begin();
-    CHECK(addr != NULL);
+    CHECK(addr != nullptr);
     debug_is_sorted_ = true;
     begin_ = reinterpret_cast<StackReference<T>*>(addr);
     Reset();
diff --git a/runtime/gc/accounting/card_table.cc b/runtime/gc/accounting/card_table.cc
index 7879632..1a7b1a3 100644
--- a/runtime/gc/accounting/card_table.cc
+++ b/runtime/gc/accounting/card_table.cc
@@ -36,7 +36,7 @@
 
 /*
  * Maintain a card table from the write barrier. All writes of
- * non-NULL values to heap addresses should go through an entry in
+ * non-null values to heap addresses should go through an entry in
  * WriteBarrier, and from there to here.
  *
  * The heap is divided into "cards" of GC_CARD_SIZE bytes, as
@@ -44,7 +44,7 @@
  * data per card, to be used by the GC. The value of the byte will be
  * one of GC_CARD_CLEAN or GC_CARD_DIRTY.
  *
- * After any store of a non-NULL object pointer into a heap object,
+ * After any store of a non-null object pointer into a heap object,
  * code is obliged to mark the card dirty. The setters in
  * object.h [such as SetFieldObject] do this for you. The
  * compiler also contains code to mark cards as dirty.
@@ -64,13 +64,13 @@
   std::unique_ptr<MemMap> mem_map(
       MemMap::MapAnonymous("card table", nullptr, capacity + 256, PROT_READ | PROT_WRITE,
                            false, false, &error_msg));
-  CHECK(mem_map.get() != NULL) << "couldn't allocate card table: " << error_msg;
+  CHECK(mem_map.get() != nullptr) << "couldn't allocate card table: " << error_msg;
   // All zeros is the correct initial value; all clean. Anonymous mmaps are initialized to zero, we
   // don't clear the card table to avoid unnecessary pages being allocated
   static_assert(kCardClean == 0, "kCardClean must be 0");
 
   uint8_t* cardtable_begin = mem_map->Begin();
-  CHECK(cardtable_begin != NULL);
+  CHECK(cardtable_begin != nullptr);
 
   // We allocated up to a bytes worth of extra space to allow biased_begin's byte value to equal
   // kCardDirty, compute a offset value to make this the case
diff --git a/runtime/gc/accounting/card_table.h b/runtime/gc/accounting/card_table.h
index 896cce5..75ef58a 100644
--- a/runtime/gc/accounting/card_table.h
+++ b/runtime/gc/accounting/card_table.h
@@ -43,7 +43,7 @@
 template<size_t kAlignment> class SpaceBitmap;
 
 // Maintain a card table from the the write barrier. All writes of
-// non-NULL values to heap addresses should go through an entry in
+// non-null values to heap addresses should go through an entry in
 // WriteBarrier, and from there to here.
 class CardTable {
  public:
diff --git a/runtime/gc/accounting/space_bitmap-inl.h b/runtime/gc/accounting/space_bitmap-inl.h
index 11347a5..ae91200 100644
--- a/runtime/gc/accounting/space_bitmap-inl.h
+++ b/runtime/gc/accounting/space_bitmap-inl.h
@@ -55,7 +55,7 @@
 inline bool SpaceBitmap<kAlignment>::Test(const mirror::Object* obj) const {
   uintptr_t addr = reinterpret_cast<uintptr_t>(obj);
   DCHECK(HasAddress(obj)) << obj;
-  DCHECK(bitmap_begin_ != NULL);
+  DCHECK(bitmap_begin_ != nullptr);
   DCHECK_GE(addr, heap_begin_);
   const uintptr_t offset = addr - heap_begin_;
   return (bitmap_begin_[OffsetToIndex(offset)] & OffsetToMask(offset)) != 0;
diff --git a/runtime/gc/accounting/space_bitmap.cc b/runtime/gc/accounting/space_bitmap.cc
index 2da8325..84dadea 100644
--- a/runtime/gc/accounting/space_bitmap.cc
+++ b/runtime/gc/accounting/space_bitmap.cc
@@ -104,8 +104,8 @@
 
 template<size_t kAlignment>
 void SpaceBitmap<kAlignment>::Walk(ObjectCallback* callback, void* arg) {
-  CHECK(bitmap_begin_ != NULL);
-  CHECK(callback != NULL);
+  CHECK(bitmap_begin_ != nullptr);
+  CHECK(callback != nullptr);
 
   uintptr_t end = OffsetToIndex(HeapLimit() - heap_begin_ - 1);
   uintptr_t* bitmap_begin = bitmap_begin_;
@@ -132,7 +132,7 @@
   CHECK(mark_bitmap.bitmap_begin_ != nullptr);
   CHECK_EQ(live_bitmap.heap_begin_, mark_bitmap.heap_begin_);
   CHECK_EQ(live_bitmap.bitmap_size_, mark_bitmap.bitmap_size_);
-  CHECK(callback != NULL);
+  CHECK(callback != nullptr);
   CHECK_LE(sweep_begin, sweep_end);
   CHECK_GE(sweep_begin, live_bitmap.heap_begin_);
 
@@ -186,7 +186,7 @@
     SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
   // Visit fields of parent classes first.
   mirror::Class* super = klass->GetSuperClass();
-  if (super != NULL) {
+  if (super != nullptr) {
     WalkInstanceFields(visited, callback, obj, super, arg);
   }
   // Walk instance fields
@@ -233,7 +233,7 @@
     int32_t length = obj_array->GetLength();
     for (int32_t i = 0; i < length; i++) {
       mirror::Object* value = obj_array->Get(i);
-      if (value != NULL) {
+      if (value != nullptr) {
         WalkFieldsInOrder(visited, callback, value, arg);
       }
     }
diff --git a/runtime/gc/accounting/space_bitmap_test.cc b/runtime/gc/accounting/space_bitmap_test.cc
index 850325a..edb08ef 100644
--- a/runtime/gc/accounting/space_bitmap_test.cc
+++ b/runtime/gc/accounting/space_bitmap_test.cc
@@ -34,7 +34,7 @@
   size_t heap_capacity = 16 * MB;
   std::unique_ptr<ContinuousSpaceBitmap> space_bitmap(
       ContinuousSpaceBitmap::Create("test bitmap", heap_begin, heap_capacity));
-  EXPECT_TRUE(space_bitmap.get() != NULL);
+  EXPECT_TRUE(space_bitmap.get() != nullptr);
 }
 
 class BitmapVerify {
@@ -62,7 +62,7 @@
 
   std::unique_ptr<ContinuousSpaceBitmap> space_bitmap(
       ContinuousSpaceBitmap::Create("test bitmap", heap_begin, heap_capacity));
-  EXPECT_TRUE(space_bitmap.get() != NULL);
+  EXPECT_TRUE(space_bitmap.get() != nullptr);
 
   // Set all the odd bits in the first BitsPerIntPtrT * 3 to one.
   for (size_t j = 0; j < kBitsPerIntPtrT * 3; ++j) {
diff --git a/runtime/gc/allocator/rosalloc.cc b/runtime/gc/allocator/rosalloc.cc
index 515f124..85234dc 100644
--- a/runtime/gc/allocator/rosalloc.cc
+++ b/runtime/gc/allocator/rosalloc.cc
@@ -115,7 +115,7 @@
 void* RosAlloc::AllocPages(Thread* self, size_t num_pages, uint8_t page_map_type) {
   lock_.AssertHeld(self);
   DCHECK(page_map_type == kPageMapRun || page_map_type == kPageMapLargeObject);
-  FreePageRun* res = NULL;
+  FreePageRun* res = nullptr;
   const size_t req_byte_size = num_pages * kPageSize;
   // Find the lowest address free page run that's large enough.
   for (auto it = free_page_runs_.begin(); it != free_page_runs_.end(); ) {
@@ -157,8 +157,8 @@
   }
 
   // Failed to allocate pages. Grow the footprint, if possible.
-  if (UNLIKELY(res == NULL && capacity_ > footprint_)) {
-    FreePageRun* last_free_page_run = NULL;
+  if (UNLIKELY(res == nullptr && capacity_ > footprint_)) {
+    FreePageRun* last_free_page_run = nullptr;
     size_t last_free_page_run_size;
     auto it = free_page_runs_.rbegin();
     if (it != free_page_runs_.rend() && (last_free_page_run = *it)->End(this) == base_ + footprint_) {
@@ -218,7 +218,7 @@
       DCHECK(it != free_page_runs_.rend());
       FreePageRun* fpr = *it;
       if (kIsDebugBuild && last_free_page_run_size > 0) {
-        DCHECK(last_free_page_run != NULL);
+        DCHECK(last_free_page_run != nullptr);
         DCHECK_EQ(last_free_page_run, fpr);
       }
       size_t fpr_byte_size = fpr->ByteSize(this);
@@ -249,7 +249,7 @@
       res = fpr;
     }
   }
-  if (LIKELY(res != NULL)) {
+  if (LIKELY(res != nullptr)) {
     // Update the page map.
     size_t page_map_idx = ToPageMapIndex(res);
     for (size_t i = 0; i < num_pages; i++) {
@@ -286,7 +286,7 @@
 
   // Fail.
   if (kTraceRosAlloc) {
-    LOG(INFO) << "RosAlloc::AllocPages() : NULL";
+    LOG(INFO) << "RosAlloc::AllocPages() : nullptr";
   }
   return nullptr;
 }
@@ -468,7 +468,7 @@
   }
   if (UNLIKELY(r == nullptr)) {
     if (kTraceRosAlloc) {
-      LOG(INFO) << "RosAlloc::AllocLargeObject() : NULL";
+      LOG(INFO) << "RosAlloc::AllocLargeObject() : nullptr";
     }
     return nullptr;
   }
@@ -824,7 +824,7 @@
     // already in the non-full run set (i.e., it was full) insert it
     // into the non-full run set.
     if (run != current_runs_[idx]) {
-      auto* full_runs = kIsDebugBuild ? &full_runs_[idx] : NULL;
+      auto* full_runs = kIsDebugBuild ? &full_runs_[idx] : nullptr;
       auto pos = non_full_runs->find(run);
       if (pos == non_full_runs->end()) {
         DCHECK(run_was_full);
@@ -1275,7 +1275,7 @@
       // Check if the run should be moved to non_full_runs_ or
       // free_page_runs_.
       auto* non_full_runs = &non_full_runs_[idx];
-      auto* full_runs = kIsDebugBuild ? &full_runs_[idx] : NULL;
+      auto* full_runs = kIsDebugBuild ? &full_runs_[idx] : nullptr;
       if (run->IsAllFree()) {
         // It has just become completely free. Free the pages of the
         // run.
@@ -1358,7 +1358,7 @@
   stream << "RosAlloc PageMap: " << std::endl;
   lock_.AssertHeld(Thread::Current());
   size_t end = page_map_size_;
-  FreePageRun* curr_fpr = NULL;
+  FreePageRun* curr_fpr = nullptr;
   size_t curr_fpr_size = 0;
   size_t remaining_curr_fpr_size = 0;
   size_t num_running_empty_pages = 0;
@@ -1373,7 +1373,7 @@
           // Encountered a fresh free page run.
           DCHECK_EQ(remaining_curr_fpr_size, static_cast<size_t>(0));
           DCHECK(fpr->IsFree());
-          DCHECK(curr_fpr == NULL);
+          DCHECK(curr_fpr == nullptr);
           DCHECK_EQ(curr_fpr_size, static_cast<size_t>(0));
           curr_fpr = fpr;
           curr_fpr_size = fpr->ByteSize(this);
@@ -1384,7 +1384,7 @@
                  << " remaining_fpr_size=" << remaining_curr_fpr_size << std::endl;
           if (remaining_curr_fpr_size == 0) {
             // Reset at the end of the current free page run.
-            curr_fpr = NULL;
+            curr_fpr = nullptr;
             curr_fpr_size = 0;
           }
           stream << "curr_fpr=0x" << std::hex << reinterpret_cast<intptr_t>(curr_fpr) << std::endl;
@@ -1392,7 +1392,7 @@
         } else {
           // Still part of the current free page run.
           DCHECK_NE(num_running_empty_pages, static_cast<size_t>(0));
-          DCHECK(curr_fpr != NULL && curr_fpr_size > 0 && remaining_curr_fpr_size > 0);
+          DCHECK(curr_fpr != nullptr && curr_fpr_size > 0 && remaining_curr_fpr_size > 0);
           DCHECK_EQ(remaining_curr_fpr_size % kPageSize, static_cast<size_t>(0));
           DCHECK_GE(remaining_curr_fpr_size, static_cast<size_t>(kPageSize));
           remaining_curr_fpr_size -= kPageSize;
@@ -1400,7 +1400,7 @@
                  << " remaining_fpr_size=" << remaining_curr_fpr_size << std::endl;
           if (remaining_curr_fpr_size == 0) {
             // Reset at the end of the current free page run.
-            curr_fpr = NULL;
+            curr_fpr = nullptr;
             curr_fpr_size = 0;
           }
         }
@@ -1546,7 +1546,7 @@
 void RosAlloc::InspectAll(void (*handler)(void* start, void* end, size_t used_bytes, void* callback_arg),
                           void* arg) {
   // Note: no need to use this to release pages as we already do so in FreePages().
-  if (handler == NULL) {
+  if (handler == nullptr) {
     return;
   }
   MutexLock mu(Thread::Current(), lock_);
diff --git a/runtime/gc/collector/mark_sweep.cc b/runtime/gc/collector/mark_sweep.cc
index 14eb80b..f0e8d14 100644
--- a/runtime/gc/collector/mark_sweep.cc
+++ b/runtime/gc/collector/mark_sweep.cc
@@ -578,7 +578,7 @@
         mark_stack_pos_(mark_stack_size) {
     // We may have to copy part of an existing mark stack when another mark stack overflows.
     if (mark_stack_size != 0) {
-      DCHECK(mark_stack != NULL);
+      DCHECK(mark_stack != nullptr);
       // TODO: Check performance?
       std::copy(mark_stack, mark_stack + mark_stack_size, mark_stack_);
     }
@@ -850,7 +850,7 @@
  public:
   RecursiveMarkTask(ThreadPool* thread_pool, MarkSweep* mark_sweep,
                     accounting::ContinuousSpaceBitmap* bitmap, uintptr_t begin, uintptr_t end)
-      : MarkStackTask<false>(thread_pool, mark_sweep, 0, NULL), bitmap_(bitmap), begin_(begin),
+      : MarkStackTask<false>(thread_pool, mark_sweep, 0, nullptr), bitmap_(bitmap), begin_(begin),
         end_(end) {
   }
 
@@ -1260,11 +1260,11 @@
     static const size_t kFifoSize = 4;
     BoundedFifoPowerOfTwo<Object*, kFifoSize> prefetch_fifo;
     for (;;) {
-      Object* obj = NULL;
+      Object* obj = nullptr;
       if (kUseMarkStackPrefetch) {
         while (!mark_stack_->IsEmpty() && prefetch_fifo.size() < kFifoSize) {
           Object* mark_stack_obj = mark_stack_->PopBack();
-          DCHECK(mark_stack_obj != NULL);
+          DCHECK(mark_stack_obj != nullptr);
           __builtin_prefetch(mark_stack_obj);
           prefetch_fifo.push_back(mark_stack_obj);
         }
diff --git a/runtime/gc/collector/semi_space.cc b/runtime/gc/collector/semi_space.cc
index dbf01d8..82d02e7 100644
--- a/runtime/gc/collector/semi_space.cc
+++ b/runtime/gc/collector/semi_space.cc
@@ -750,7 +750,7 @@
     SHARED_LOCKS_REQUIRED(Locks::heap_bitmap_lock_) {
   // All immune objects are assumed marked.
   if (from_space_->HasAddress(obj)) {
-    // Returns either the forwarding address or nullptr.
+    // Returns either the forwarding address or null.
     return GetForwardingAddressInFromSpace(obj);
   } else if (collect_from_space_only_ || immune_region_.ContainsObject(obj) ||
              to_space_->HasAddress(obj)) {
diff --git a/runtime/gc/heap-inl.h b/runtime/gc/heap-inl.h
index b770096..3e56205 100644
--- a/runtime/gc/heap-inl.h
+++ b/runtime/gc/heap-inl.h
@@ -293,7 +293,7 @@
           return nullptr;
         }
         // Try allocating a new thread local buffer, if the allocaiton fails the space must be
-        // full so return nullptr.
+        // full so return null.
         if (!bump_pointer_space_->AllocNewTlab(self, new_tlab_size)) {
           return nullptr;
         }
diff --git a/runtime/gc/heap.cc b/runtime/gc/heap.cc
index beaf067..b80c4b6 100644
--- a/runtime/gc/heap.cc
+++ b/runtime/gc/heap.cc
@@ -412,7 +412,7 @@
   }
   // Allocate the card table.
   card_table_.reset(accounting::CardTable::Create(heap_begin, heap_capacity));
-  CHECK(card_table_.get() != NULL) << "Failed to create card table";
+  CHECK(card_table_.get() != nullptr) << "Failed to create card table";
 
   if (foreground_collector_type_ == kCollectorTypeCC && kUseTableLookupReadBarrier) {
     rb_table_.reset(new accounting::ReadBarrierTable());
@@ -1052,7 +1052,7 @@
   if (!fail_ok) {
     LOG(FATAL) << "object " << reinterpret_cast<const void*>(obj) << " not inside any spaces!";
   }
-  return NULL;
+  return nullptr;
 }
 
 space::DiscontinuousSpace* Heap::FindDiscontinuousSpaceFromObject(const mirror::Object* obj,
@@ -1065,12 +1065,12 @@
   if (!fail_ok) {
     LOG(FATAL) << "object " << reinterpret_cast<const void*>(obj) << " not inside any spaces!";
   }
-  return NULL;
+  return nullptr;
 }
 
 space::Space* Heap::FindSpaceFromObject(const mirror::Object* obj, bool fail_ok) const {
   space::Space* result = FindContinuousSpaceFromObject(obj, true);
-  if (result != NULL) {
+  if (result != nullptr) {
     return result;
   }
   return FindDiscontinuousSpaceFromObject(obj, fail_ok);
@@ -1082,7 +1082,7 @@
       return space->AsImageSpace();
     }
   }
-  return NULL;
+  return nullptr;
 }
 
 void Heap::ThrowOutOfMemoryError(Thread* self, size_t byte_count, AllocatorType allocator_type) {
@@ -2204,7 +2204,7 @@
   // Turn the current alloc space into a zygote space and obtain the new alloc space composed of
   // the remaining available space.
   // Remove the old space before creating the zygote space since creating the zygote space sets
-  // the old alloc space's bitmaps to nullptr.
+  // the old alloc space's bitmaps to null.
   RemoveSpace(old_alloc_space);
   if (collector::SemiSpace::kUseRememberedSet) {
     // Sanity bound check.
diff --git a/runtime/gc/heap.h b/runtime/gc/heap.h
index 066b4c5..565687c 100644
--- a/runtime/gc/heap.h
+++ b/runtime/gc/heap.h
@@ -396,7 +396,7 @@
   void RecordFreeRevoke();
 
   // Must be called if a field of an Object in the heap changes, and before any GC safe-point.
-  // The call is not needed if NULL is stored in the field.
+  // The call is not needed if null is stored in the field.
   ALWAYS_INLINE void WriteBarrierField(const mirror::Object* dst, MemberOffset /*offset*/,
                                        const mirror::Object* /*new_value*/) {
     card_table_->MarkCard(dst);
@@ -991,7 +991,7 @@
   // programs it is "cleared" making it the same as capacity.
   size_t growth_limit_;
 
-  // When the number of bytes allocated exceeds the footprint TryAllocate returns NULL indicating
+  // When the number of bytes allocated exceeds the footprint TryAllocate returns null indicating
   // a GC should be triggered.
   size_t max_allowed_footprint_;
 
diff --git a/runtime/gc/reference_queue.cc b/runtime/gc/reference_queue.cc
index 7be0704..4c93a4c 100644
--- a/runtime/gc/reference_queue.cc
+++ b/runtime/gc/reference_queue.cc
@@ -30,7 +30,7 @@
 }
 
 void ReferenceQueue::AtomicEnqueueIfNotEnqueued(Thread* self, mirror::Reference* ref) {
-  DCHECK(ref != NULL);
+  DCHECK(ref != nullptr);
   MutexLock mu(self, *lock_);
   if (!ref->IsEnqueued()) {
     EnqueuePendingReference(ref);
@@ -43,7 +43,7 @@
 }
 
 void ReferenceQueue::EnqueuePendingReference(mirror::Reference* ref) {
-  DCHECK(ref != NULL);
+  DCHECK(ref != nullptr);
   if (IsEmpty()) {
     // 1 element cyclic queue, ie: Reference ref = ..; ref.pendingNext = ref;
     list_ = ref;
diff --git a/runtime/gc/space/bump_pointer_space.h b/runtime/gc/space/bump_pointer_space.h
index c496a42..df43606 100644
--- a/runtime/gc/space/bump_pointer_space.h
+++ b/runtime/gc/space/bump_pointer_space.h
@@ -45,7 +45,7 @@
   static BumpPointerSpace* Create(const std::string& name, size_t capacity, uint8_t* requested_begin);
   static BumpPointerSpace* CreateFromMemMap(const std::string& name, MemMap* mem_map);
 
-  // Allocate num_bytes, returns nullptr if the space is full.
+  // Allocate num_bytes, returns null if the space is full.
   mirror::Object* Alloc(Thread* self, size_t num_bytes, size_t* bytes_allocated,
                         size_t* usable_size, size_t* bytes_tl_bulk_allocated) OVERRIDE;
   // Thread-unsafe allocation for when mutators are suspended, used by the semispace collector.
diff --git a/runtime/gc/space/dlmalloc_space-inl.h b/runtime/gc/space/dlmalloc_space-inl.h
index 9eace89..4fc4ada 100644
--- a/runtime/gc/space/dlmalloc_space-inl.h
+++ b/runtime/gc/space/dlmalloc_space-inl.h
@@ -35,7 +35,7 @@
     obj = AllocWithoutGrowthLocked(self, num_bytes, bytes_allocated, usable_size,
                                    bytes_tl_bulk_allocated);
   }
-  if (LIKELY(obj != NULL)) {
+  if (LIKELY(obj != nullptr)) {
     // Zero freshly allocated memory, done while not holding the space's lock.
     memset(obj, 0, num_bytes);
   }
@@ -57,13 +57,13 @@
     size_t* usable_size,
     size_t* bytes_tl_bulk_allocated) {
   mirror::Object* result = reinterpret_cast<mirror::Object*>(mspace_malloc(mspace_, num_bytes));
-  if (LIKELY(result != NULL)) {
+  if (LIKELY(result != nullptr)) {
     if (kDebugSpaces) {
       CHECK(Contains(result)) << "Allocation (" << reinterpret_cast<void*>(result)
             << ") not in bounds of allocation space " << *this;
     }
     size_t allocation_size = AllocationSizeNonvirtual(result, usable_size);
-    DCHECK(bytes_allocated != NULL);
+    DCHECK(bytes_allocated != nullptr);
     *bytes_allocated = allocation_size;
     *bytes_tl_bulk_allocated = allocation_size;
   }
diff --git a/runtime/gc/space/dlmalloc_space.cc b/runtime/gc/space/dlmalloc_space.cc
index 225861d..7b1a421f 100644
--- a/runtime/gc/space/dlmalloc_space.cc
+++ b/runtime/gc/space/dlmalloc_space.cc
@@ -39,7 +39,7 @@
     : MallocSpace(name, mem_map, begin, end, limit, growth_limit, true, can_move_objects,
                   starting_size, initial_size),
       mspace_(mspace) {
-  CHECK(mspace != NULL);
+  CHECK(mspace != nullptr);
 }
 
 DlMallocSpace* DlMallocSpace::CreateFromMemMap(MemMap* mem_map, const std::string& name,
@@ -176,7 +176,7 @@
 }
 
 size_t DlMallocSpace::FreeList(Thread* self, size_t num_ptrs, mirror::Object** ptrs) {
-  DCHECK(ptrs != NULL);
+  DCHECK(ptrs != nullptr);
 
   // Don't need the lock to calculate the size of the freed pointers.
   size_t bytes_freed = 0;
@@ -232,7 +232,7 @@
                       void* arg) {
   MutexLock mu(Thread::Current(), lock_);
   mspace_inspect_all(mspace_, callback, arg);
-  callback(NULL, NULL, 0, arg);  // Indicate end of a space.
+  callback(nullptr, nullptr, 0, arg);  // Indicate end of a space.
 }
 
 size_t DlMallocSpace::GetFootprint() {
diff --git a/runtime/gc/space/image_space.cc b/runtime/gc/space/image_space.cc
index e28e8d7..f350038 100644
--- a/runtime/gc/space/image_space.cc
+++ b/runtime/gc/space/image_space.cc
@@ -666,7 +666,7 @@
   }
 
   std::unique_ptr<File> file(OS::OpenFileForReading(image_filename));
-  if (file.get() == NULL) {
+  if (file.get() == nullptr) {
     *error_msg = StringPrintf("Failed to open '%s'", image_filename);
     return nullptr;
   }
@@ -695,7 +695,7 @@
   std::unique_ptr<MemMap> map(MemMap::MapFileAtAddress(
       image_header.GetImageBegin(), image_header.GetImageSize() + image_header.GetArtFieldsSize(),
       PROT_READ | PROT_WRITE, MAP_PRIVATE, file->Fd(), 0, false, image_filename, error_msg));
-  if (map.get() == NULL) {
+  if (map.get() == nullptr) {
     DCHECK(!error_msg->empty());
     return nullptr;
   }
@@ -786,7 +786,7 @@
                                     image_header.GetOatFileBegin(),
                                     !Runtime::Current()->IsAotCompiler(),
                                     nullptr, error_msg);
-  if (oat_file == NULL) {
+  if (oat_file == nullptr) {
     *error_msg = StringPrintf("Failed to open oat file '%s' referenced from image %s: %s",
                               oat_filename.c_str(), GetName(), error_msg->c_str());
     return nullptr;
@@ -811,7 +811,7 @@
 }
 
 bool ImageSpace::ValidateOatFile(std::string* error_msg) const {
-  CHECK(oat_file_.get() != NULL);
+  CHECK(oat_file_.get() != nullptr);
   for (const OatFile::OatDexFile* oat_dex_file : oat_file_->GetOatDexFiles()) {
     const std::string& dex_file_location = oat_dex_file->GetDexFileLocation();
     uint32_t dex_file_location_checksum;
@@ -837,7 +837,7 @@
 }
 
 OatFile* ImageSpace::ReleaseOatFile() {
-  CHECK(oat_file_.get() != NULL);
+  CHECK(oat_file_.get() != nullptr);
   return oat_file_.release();
 }
 
diff --git a/runtime/gc/space/image_space.h b/runtime/gc/space/image_space.h
index 9ae2af4..54dc7a6 100644
--- a/runtime/gc/space/image_space.h
+++ b/runtime/gc/space/image_space.h
@@ -52,7 +52,7 @@
                                            InstructionSet image_isa);
 
   // Reads the image header from the specified image location for the
-  // instruction set image_isa. Returns nullptr on failure, with
+  // instruction set image_isa. Returns null on failure, with
   // reason in error_msg.
   static ImageHeader* ReadImageHeader(const char* image_location,
                                       InstructionSet image_isa,
@@ -122,7 +122,7 @@
 
  private:
   // Tries to initialize an ImageSpace from the given image path,
-  // returning NULL on error.
+  // returning null on error.
   //
   // If validate_oat_file is false (for /system), do not verify that
   // image's OatFile is up-to-date relative to its DexFile
diff --git a/runtime/gc/space/large_object_space.cc b/runtime/gc/space/large_object_space.cc
index 7353c83..4dfdaa5 100644
--- a/runtime/gc/space/large_object_space.cc
+++ b/runtime/gc/space/large_object_space.cc
@@ -124,9 +124,9 @@
   std::string error_msg;
   MemMap* mem_map = MemMap::MapAnonymous("large object space allocation", nullptr, num_bytes,
                                          PROT_READ | PROT_WRITE, true, false, &error_msg);
-  if (UNLIKELY(mem_map == NULL)) {
+  if (UNLIKELY(mem_map == nullptr)) {
     LOG(WARNING) << "Large object allocation failed: " << error_msg;
-    return NULL;
+    return nullptr;
   }
   mirror::Object* const obj = reinterpret_cast<mirror::Object*>(mem_map->Begin());
   if (kIsDebugBuild) {
@@ -206,7 +206,7 @@
   for (auto it = mem_maps_.begin(); it != mem_maps_.end(); ++it) {
     MemMap* mem_map = it->second;
     callback(mem_map->Begin(), mem_map->End(), mem_map->Size(), arg);
-    callback(NULL, NULL, 0, arg);
+    callback(nullptr, nullptr, 0, arg);
   }
 }
 
@@ -316,7 +316,7 @@
   std::string error_msg;
   MemMap* mem_map = MemMap::MapAnonymous(name.c_str(), requested_begin, size,
                                          PROT_READ | PROT_WRITE, true, false, &error_msg);
-  CHECK(mem_map != NULL) << "Failed to allocate large object space mem map: " << error_msg;
+  CHECK(mem_map != nullptr) << "Failed to allocate large object space mem map: " << error_msg;
   return new FreeListSpace(name, mem_map, mem_map->Begin(), mem_map->End());
 }
 
diff --git a/runtime/gc/space/malloc_space.cc b/runtime/gc/space/malloc_space.cc
index 9195b06..b014217 100644
--- a/runtime/gc/space/malloc_space.cc
+++ b/runtime/gc/space/malloc_space.cc
@@ -75,13 +75,13 @@
     LOG(ERROR) << "Failed to create alloc space (" << name << ") where the initial size ("
         << PrettySize(*initial_size) << ") is larger than its capacity ("
         << PrettySize(*growth_limit) << ")";
-    return NULL;
+    return nullptr;
   }
   if (*growth_limit > *capacity) {
     LOG(ERROR) << "Failed to create alloc space (" << name << ") where the growth limit capacity ("
         << PrettySize(*growth_limit) << ") is larger than the capacity ("
         << PrettySize(*capacity) << ")";
-    return NULL;
+    return nullptr;
   }
 
   // Page align growth limit and capacity which will be used to manage mmapped storage
diff --git a/runtime/gc/space/malloc_space.h b/runtime/gc/space/malloc_space.h
index bbf1bbb..5f3a1db 100644
--- a/runtime/gc/space/malloc_space.h
+++ b/runtime/gc/space/malloc_space.h
@@ -60,7 +60,7 @@
   // Allocate num_bytes without allowing the underlying space to grow.
   virtual mirror::Object* Alloc(Thread* self, size_t num_bytes, size_t* bytes_allocated,
                                 size_t* usable_size, size_t* bytes_tl_bulk_allocated) = 0;
-  // Return the storage space required by obj. If usable_size isn't nullptr then it is set to the
+  // Return the storage space required by obj. If usable_size isn't null then it is set to the
   // amount of the storage space that may be used by obj.
   virtual size_t AllocationSize(mirror::Object* obj, size_t* usable_size) = 0;
   virtual size_t Free(Thread* self, mirror::Object* ptr)
diff --git a/runtime/gc/space/region_space.h b/runtime/gc/space/region_space.h
index b88ce24..19109f0 100644
--- a/runtime/gc/space/region_space.h
+++ b/runtime/gc/space/region_space.h
@@ -40,7 +40,7 @@
   // space to confirm the request was granted.
   static RegionSpace* Create(const std::string& name, size_t capacity, uint8_t* requested_begin);
 
-  // Allocate num_bytes, returns nullptr if the space is full.
+  // Allocate num_bytes, returns null if the space is full.
   mirror::Object* Alloc(Thread* self, size_t num_bytes, size_t* bytes_allocated,
                         size_t* usable_size, size_t* bytes_tl_bulk_allocated) OVERRIDE;
   // Thread-unsafe allocation for when mutators are suspended, used by the semispace collector.
diff --git a/runtime/gc/space/rosalloc_space-inl.h b/runtime/gc/space/rosalloc_space-inl.h
index 9d582a3..25d4445 100644
--- a/runtime/gc/space/rosalloc_space-inl.h
+++ b/runtime/gc/space/rosalloc_space-inl.h
@@ -73,18 +73,18 @@
       rosalloc_->Alloc<kThreadSafe>(self, num_bytes, &rosalloc_bytes_allocated,
                                     &rosalloc_usable_size,
                                     &rosalloc_bytes_tl_bulk_allocated));
-  if (LIKELY(result != NULL)) {
+  if (LIKELY(result != nullptr)) {
     if (kDebugSpaces) {
       CHECK(Contains(result)) << "Allocation (" << reinterpret_cast<void*>(result)
             << ") not in bounds of allocation space " << *this;
     }
-    DCHECK(bytes_allocated != NULL);
+    DCHECK(bytes_allocated != nullptr);
     *bytes_allocated = rosalloc_bytes_allocated;
     DCHECK_EQ(rosalloc_usable_size, rosalloc_->UsableSize(result));
     if (usable_size != nullptr) {
       *usable_size = rosalloc_usable_size;
     }
-    DCHECK(bytes_tl_bulk_allocated != NULL);
+    DCHECK(bytes_tl_bulk_allocated != nullptr);
     *bytes_tl_bulk_allocated = rosalloc_bytes_tl_bulk_allocated;
   }
   return result;
diff --git a/runtime/gc/space/rosalloc_space.cc b/runtime/gc/space/rosalloc_space.cc
index eb1d5f4..2c7d93e 100644
--- a/runtime/gc/space/rosalloc_space.cc
+++ b/runtime/gc/space/rosalloc_space.cc
@@ -64,9 +64,9 @@
 
   allocator::RosAlloc* rosalloc = CreateRosAlloc(mem_map->Begin(), starting_size, initial_size,
                                                  capacity, low_memory_mode, running_on_valgrind);
-  if (rosalloc == NULL) {
+  if (rosalloc == nullptr) {
     LOG(ERROR) << "Failed to initialize rosalloc for alloc space (" << name << ")";
-    return NULL;
+    return nullptr;
   }
 
   // Protect memory beyond the starting size. MoreCore will add r/w permissions when necessory
@@ -113,10 +113,10 @@
   size_t starting_size = Heap::kDefaultStartingSize;
   MemMap* mem_map = CreateMemMap(name, starting_size, &initial_size, &growth_limit, &capacity,
                                  requested_begin);
-  if (mem_map == NULL) {
+  if (mem_map == nullptr) {
     LOG(ERROR) << "Failed to create mem map for alloc space (" << name << ") of size "
                << PrettySize(capacity);
-    return NULL;
+    return nullptr;
   }
 
   RosAllocSpace* space = CreateFromMemMap(mem_map, name, starting_size, initial_size,
@@ -145,7 +145,7 @@
           art::gc::allocator::RosAlloc::kPageReleaseModeAll :
           art::gc::allocator::RosAlloc::kPageReleaseModeSizeAndEnd,
       running_on_valgrind);
-  if (rosalloc != NULL) {
+  if (rosalloc != nullptr) {
     rosalloc->SetFootprintLimit(initial_size);
   } else {
     PLOG(ERROR) << "RosAlloc::Create failed";
@@ -170,7 +170,7 @@
     rosalloc_->SetFootprintLimit(footprint);
   }
   // Note RosAlloc zeroes memory internally.
-  // Return the new allocation or NULL.
+  // Return the new allocation or null.
   CHECK(!kDebugSpaces || result == nullptr || Contains(result));
   return result;
 }
@@ -192,7 +192,7 @@
 
 size_t RosAllocSpace::Free(Thread* self, mirror::Object* ptr) {
   if (kDebugSpaces) {
-    CHECK(ptr != NULL);
+    CHECK(ptr != nullptr);
     CHECK(Contains(ptr)) << "Free (" << ptr << ") not in bounds of heap " << *this;
   }
   if (kRecentFreeCount > 0) {
@@ -309,7 +309,7 @@
     MutexLock mu2(self, *Locks::thread_list_lock_);
     rosalloc_->InspectAll(callback, arg);
     if (do_null_callback_at_end) {
-      callback(NULL, NULL, 0, arg);  // Indicate end of a space.
+      callback(nullptr, nullptr, 0, arg);  // Indicate end of a space.
     }
   }
   tl->ResumeAll();
@@ -324,7 +324,7 @@
     // from SignalCatcher::HandleSigQuit().
     rosalloc_->InspectAll(callback, arg);
     if (do_null_callback_at_end) {
-      callback(NULL, NULL, 0, arg);  // Indicate end of a space.
+      callback(nullptr, nullptr, 0, arg);  // Indicate end of a space.
     }
   } else if (Locks::mutator_lock_->IsSharedHeld(self)) {
     // The mutators are not suspended yet and we have a shared access
diff --git a/runtime/gc_map.h b/runtime/gc_map.h
index ffe54c4..b4ccdd6 100644
--- a/runtime/gc_map.h
+++ b/runtime/gc_map.h
@@ -28,7 +28,7 @@
 class NativePcOffsetToReferenceMap {
  public:
   explicit NativePcOffsetToReferenceMap(const uint8_t* data) : data_(data) {
-    CHECK(data_ != NULL);
+    CHECK(data_ != nullptr);
   }
 
   // The number of entries in the table.
diff --git a/runtime/hprof/hprof.cc b/runtime/hprof/hprof.cc
index 23af25d..fb7ff54 100644
--- a/runtime/hprof/hprof.cc
+++ b/runtime/hprof/hprof.cc
@@ -538,7 +538,7 @@
 
       // STRING format:
       // ID:  ID for this string
-      // U1*: UTF8 characters for string (NOT NULL terminated)
+      // U1*: UTF8 characters for string (NOT null terminated)
       //      (the record format encodes the length)
       __ AddU4(id);
       __ AddUtf8String(string.c_str());
@@ -931,7 +931,7 @@
 
   mirror::Class* c = obj->GetClass();
   if (c == nullptr) {
-    // This object will bother HprofReader, because it has a NULL
+    // This object will bother HprofReader, because it has a null
     // class, so just don't dump it. It could be
     // gDvm.unlinkedJavaLangClass or it could be an object just
     // allocated which hasn't been initialized yet.
@@ -1057,7 +1057,7 @@
     __ AddU4(length);
     __ AddClassId(LookupClassId(klass));
 
-    // Dump the elements, which are always objects or NULL.
+    // Dump the elements, which are always objects or null.
     __ AddIdList(obj->AsObjectArray<mirror::Object>());
   } else {
     size_t size;
diff --git a/runtime/indirect_reference_table-inl.h b/runtime/indirect_reference_table-inl.h
index e571a0e..639be51 100644
--- a/runtime/indirect_reference_table-inl.h
+++ b/runtime/indirect_reference_table-inl.h
@@ -31,7 +31,7 @@
 // Returns "false" if something looks bad.
 inline bool IndirectReferenceTable::GetChecked(IndirectRef iref) const {
   if (UNLIKELY(iref == nullptr)) {
-    LOG(WARNING) << "Attempt to look up NULL " << kind_;
+    LOG(WARNING) << "Attempt to look up nullptr " << kind_;
     return false;
   }
   if (UNLIKELY(GetIndirectRefKind(iref) == kHandleScopeOrInvalid)) {
diff --git a/runtime/indirect_reference_table.cc b/runtime/indirect_reference_table.cc
index d6f9682..e2b9559 100644
--- a/runtime/indirect_reference_table.cc
+++ b/runtime/indirect_reference_table.cc
@@ -103,9 +103,9 @@
   prevState.all = cookie;
   size_t topIndex = segment_state_.parts.topIndex;
 
-  CHECK(obj != NULL);
+  CHECK(obj != nullptr);
   VerifyObject(obj);
-  DCHECK(table_ != NULL);
+  DCHECK(table_ != nullptr);
   DCHECK_GE(segment_state_.parts.numHoles, prevState.parts.numHoles);
 
   if (topIndex == max_entries_) {
@@ -144,7 +144,7 @@
               << " holes=" << segment_state_.parts.numHoles;
   }
 
-  DCHECK(result != NULL);
+  DCHECK(result != nullptr);
   return result;
 }
 
@@ -172,7 +172,7 @@
   int topIndex = segment_state_.parts.topIndex;
   int bottomIndex = prevState.parts.topIndex;
 
-  DCHECK(table_ != NULL);
+  DCHECK(table_ != nullptr);
   DCHECK_GE(segment_state_.parts.numHoles, prevState.parts.numHoles);
 
   if (GetIndirectRefKind(iref) == kHandleScopeOrInvalid &&
@@ -227,9 +227,8 @@
       }
     }
   } else {
-    // Not the top-most entry.  This creates a hole.  We NULL out the
-    // entry to prevent somebody from deleting it twice and screwing up
-    // the hole count.
+    // Not the top-most entry.  This creates a hole.  We null out the entry to prevent somebody
+    // from deleting it twice and screwing up the hole count.
     if (table_[idx].GetReference()->IsNull()) {
       LOG(INFO) << "--- WEIRD: removing null entry " << idx;
       return false;
@@ -270,9 +269,7 @@
   ReferenceTable::Table entries;
   for (size_t i = 0; i < Capacity(); ++i) {
     mirror::Object* obj = table_[i].GetReference()->Read<kWithoutReadBarrier>();
-    if (UNLIKELY(obj == nullptr)) {
-      // Remove NULLs.
-    } else {
+    if (obj != nullptr) {
       obj = table_[i].GetReference()->Read();
       entries.push_back(GcRoot<mirror::Object>(obj));
     }
diff --git a/runtime/indirect_reference_table.h b/runtime/indirect_reference_table.h
index 0072184..a0e53af 100644
--- a/runtime/indirect_reference_table.h
+++ b/runtime/indirect_reference_table.h
@@ -268,9 +268,9 @@
   bool IsValid() const;
 
   /*
-   * Add a new entry.  "obj" must be a valid non-NULL object reference.
+   * Add a new entry.  "obj" must be a valid non-nullptr object reference.
    *
-   * Returns NULL if the table is full (max entries reached, or alloc
+   * Returns nullptr if the table is full (max entries reached, or alloc
    * failed during expansion).
    */
   IndirectRef Add(uint32_t cookie, mirror::Object* obj)
diff --git a/runtime/indirect_reference_table_test.cc b/runtime/indirect_reference_table_test.cc
index fe1b8f0..c20002b 100644
--- a/runtime/indirect_reference_table_test.cc
+++ b/runtime/indirect_reference_table_test.cc
@@ -216,7 +216,7 @@
   ASSERT_EQ(0U, irt.Capacity()) << "temporal del not empty";
   CheckDump(&irt, 0, 0);
 
-  // nullptr isn't a valid iref.
+  // null isn't a valid iref.
   ASSERT_TRUE(irt.Get(nullptr) == nullptr);
 
   // Stale lookup.
diff --git a/runtime/instrumentation.cc b/runtime/instrumentation.cc
index 51600f7..e6c333d 100644
--- a/runtime/instrumentation.cc
+++ b/runtime/instrumentation.cc
@@ -177,14 +177,14 @@
 
     virtual bool VisitFrame() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
       mirror::ArtMethod* m = GetMethod();
-      if (m == NULL) {
+      if (m == nullptr) {
         if (kVerboseInstrumentation) {
           LOG(INFO) << "  Skipping upcall. Frame " << GetFrameId();
         }
         last_return_pc_ = 0;
         return true;  // Ignore upcalls.
       }
-      if (GetCurrentQuickFrame() == NULL) {
+      if (GetCurrentQuickFrame() == nullptr) {
         bool interpreter_frame = true;
         InstrumentationStackFrame instrumentation_frame(GetThisObject(), m, 0, GetFrameId(),
                                                         interpreter_frame);
@@ -309,7 +309,7 @@
   struct RestoreStackVisitor : public StackVisitor {
     RestoreStackVisitor(Thread* thread_in, uintptr_t instrumentation_exit_pc,
                         Instrumentation* instrumentation)
-        : StackVisitor(thread_in, NULL), thread_(thread_in),
+        : StackVisitor(thread_in, nullptr), thread_(thread_in),
           instrumentation_exit_pc_(instrumentation_exit_pc),
           instrumentation_(instrumentation),
           instrumentation_stack_(thread_in->GetInstrumentationStack()),
@@ -320,14 +320,14 @@
         return false;  // Stop.
       }
       mirror::ArtMethod* m = GetMethod();
-      if (GetCurrentQuickFrame() == NULL) {
+      if (GetCurrentQuickFrame() == nullptr) {
         if (kVerboseInstrumentation) {
           LOG(INFO) << "  Ignoring a shadow frame. Frame " << GetFrameId()
               << " Method=" << PrettyMethod(m);
         }
         return true;  // Ignore shadow frames.
       }
-      if (m == NULL) {
+      if (m == nullptr) {
         if (kVerboseInstrumentation) {
           LOG(INFO) << "  Skipping upcall. Frame " << GetFrameId();
         }
@@ -645,7 +645,7 @@
   Runtime* runtime = Runtime::Current();
   if (runtime->IsStarted()) {
     MutexLock mu(Thread::Current(), *Locks::thread_list_lock_);
-    runtime->GetThreadList()->ForEach(ResetQuickAllocEntryPointsForThread, NULL);
+    runtime->GetThreadList()->ForEach(ResetQuickAllocEntryPointsForThread, nullptr);
   }
 }
 
diff --git a/runtime/intern_table.cc b/runtime/intern_table.cc
index 4c5fc81..a85d10f 100644
--- a/runtime/intern_table.cc
+++ b/runtime/intern_table.cc
@@ -194,7 +194,7 @@
       uint32_t string_idx = dex_file->GetIndexForStringId(*string_id);
       // GetResolvedString() contains a RB.
       mirror::String* image_string = dex_cache->GetResolvedString(string_idx);
-      if (image_string != NULL) {
+      if (image_string != nullptr) {
         return image_string;
       }
     }
diff --git a/runtime/intern_table.h b/runtime/intern_table.h
index 200a764..1e5d3c2 100644
--- a/runtime/intern_table.h
+++ b/runtime/intern_table.h
@@ -150,7 +150,7 @@
     UnorderedSet post_zygote_table_;
   };
 
-  // Insert if non null, otherwise return nullptr.
+  // Insert if non null, otherwise return null.
   mirror::String* Insert(mirror::String* s, bool is_strong)
       LOCKS_EXCLUDED(Locks::intern_table_lock_)
       SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
diff --git a/runtime/intern_table_test.cc b/runtime/intern_table_test.cc
index d462e14..194d0af 100644
--- a/runtime/intern_table_test.cc
+++ b/runtime/intern_table_test.cc
@@ -38,8 +38,8 @@
   EXPECT_TRUE(foo_1->Equals("foo"));
   EXPECT_TRUE(foo_2->Equals("foo"));
   EXPECT_TRUE(foo_3->Equals("foo"));
-  EXPECT_TRUE(foo_1.Get() != NULL);
-  EXPECT_TRUE(foo_2.Get() != NULL);
+  EXPECT_TRUE(foo_1.Get() != nullptr);
+  EXPECT_TRUE(foo_2.Get() != nullptr);
   EXPECT_EQ(foo_1.Get(), foo_2.Get());
   EXPECT_NE(foo_1.Get(), bar.Get());
   EXPECT_NE(foo_2.Get(), bar.Get());
diff --git a/runtime/interpreter/interpreter.cc b/runtime/interpreter/interpreter.cc
index 4801124..423b952 100644
--- a/runtime/interpreter/interpreter.cc
+++ b/runtime/interpreter/interpreter.cc
@@ -78,7 +78,8 @@
       ScopedLocalRef<jclass> klass(soa.Env(),
                                    soa.AddLocalReference<jclass>(method->GetDeclaringClass()));
       ScopedLocalRef<jobject> arg0(soa.Env(),
-                                   soa.AddLocalReference<jobject>(reinterpret_cast<Object*>(args[0])));
+                                   soa.AddLocalReference<jobject>(
+                                       reinterpret_cast<Object*>(args[0])));
       jobject jresult;
       {
         ScopedThreadStateChange tsc(self, kNative);
@@ -99,12 +100,14 @@
       ScopedLocalRef<jclass> klass(soa.Env(),
                                    soa.AddLocalReference<jclass>(method->GetDeclaringClass()));
       ScopedLocalRef<jobject> arg0(soa.Env(),
-                                   soa.AddLocalReference<jobject>(reinterpret_cast<Object*>(args[0])));
+                                   soa.AddLocalReference<jobject>(
+                                       reinterpret_cast<Object*>(args[0])));
       ScopedThreadStateChange tsc(self, kNative);
       result->SetI(fn(soa.Env(), klass.get(), arg0.get(), args[1]));
     } else if (shorty == "SIZ") {
       typedef jshort (fntype)(JNIEnv*, jclass, jint, jboolean);
-      fntype* const fn = reinterpret_cast<fntype*>(const_cast<void*>(method->GetEntryPointFromJni()));
+      fntype* const fn =
+          reinterpret_cast<fntype*>(const_cast<void*>(method->GetEntryPointFromJni()));
       ScopedLocalRef<jclass> klass(soa.Env(),
                                    soa.AddLocalReference<jclass>(method->GetDeclaringClass()));
       ScopedThreadStateChange tsc(self, kNative);
@@ -122,9 +125,11 @@
       ScopedLocalRef<jclass> klass(soa.Env(),
                                    soa.AddLocalReference<jclass>(method->GetDeclaringClass()));
       ScopedLocalRef<jobject> arg0(soa.Env(),
-                                   soa.AddLocalReference<jobject>(reinterpret_cast<Object*>(args[0])));
+                                   soa.AddLocalReference<jobject>(
+                                       reinterpret_cast<Object*>(args[0])));
       ScopedLocalRef<jobject> arg1(soa.Env(),
-                                   soa.AddLocalReference<jobject>(reinterpret_cast<Object*>(args[1])));
+                                   soa.AddLocalReference<jobject>(
+                                       reinterpret_cast<Object*>(args[1])));
       ScopedThreadStateChange tsc(self, kNative);
       result->SetZ(fn(soa.Env(), klass.get(), arg0.get(), arg1.get()));
     } else if (shorty == "ZILL") {
@@ -133,9 +138,11 @@
       ScopedLocalRef<jclass> klass(soa.Env(),
                                    soa.AddLocalReference<jclass>(method->GetDeclaringClass()));
       ScopedLocalRef<jobject> arg1(soa.Env(),
-                                   soa.AddLocalReference<jobject>(reinterpret_cast<Object*>(args[1])));
+                                   soa.AddLocalReference<jobject>(
+                                       reinterpret_cast<Object*>(args[1])));
       ScopedLocalRef<jobject> arg2(soa.Env(),
-                                   soa.AddLocalReference<jobject>(reinterpret_cast<Object*>(args[2])));
+                                   soa.AddLocalReference<jobject>(
+                                       reinterpret_cast<Object*>(args[2])));
       ScopedThreadStateChange tsc(self, kNative);
       result->SetZ(fn(soa.Env(), klass.get(), args[0], arg1.get(), arg2.get()));
     } else if (shorty == "VILII") {
@@ -144,7 +151,8 @@
       ScopedLocalRef<jclass> klass(soa.Env(),
                                    soa.AddLocalReference<jclass>(method->GetDeclaringClass()));
       ScopedLocalRef<jobject> arg1(soa.Env(),
-                                   soa.AddLocalReference<jobject>(reinterpret_cast<Object*>(args[1])));
+                                   soa.AddLocalReference<jobject>(
+                                       reinterpret_cast<Object*>(args[1])));
       ScopedThreadStateChange tsc(self, kNative);
       fn(soa.Env(), klass.get(), args[0], arg1.get(), args[2], args[3]);
     } else if (shorty == "VLILII") {
@@ -153,9 +161,11 @@
       ScopedLocalRef<jclass> klass(soa.Env(),
                                    soa.AddLocalReference<jclass>(method->GetDeclaringClass()));
       ScopedLocalRef<jobject> arg0(soa.Env(),
-                                   soa.AddLocalReference<jobject>(reinterpret_cast<Object*>(args[0])));
+                                   soa.AddLocalReference<jobject>(
+                                       reinterpret_cast<Object*>(args[0])));
       ScopedLocalRef<jobject> arg2(soa.Env(),
-                                   soa.AddLocalReference<jobject>(reinterpret_cast<Object*>(args[2])));
+                                   soa.AddLocalReference<jobject>(
+                                       reinterpret_cast<Object*>(args[2])));
       ScopedThreadStateChange tsc(self, kNative);
       fn(soa.Env(), klass.get(), arg0.get(), args[1], arg2.get(), args[3], args[4]);
     } else {
@@ -187,7 +197,8 @@
       ScopedLocalRef<jobject> rcvr(soa.Env(),
                                    soa.AddLocalReference<jobject>(receiver));
       ScopedLocalRef<jobject> arg0(soa.Env(),
-                                   soa.AddLocalReference<jobject>(reinterpret_cast<Object*>(args[0])));
+                                   soa.AddLocalReference<jobject>(
+                                       reinterpret_cast<Object*>(args[0])));
       jobject jresult;
       {
         ScopedThreadStateChange tsc(self, kNative);
@@ -302,7 +313,7 @@
   const DexFile::CodeItem* code_item = method->GetCodeItem();
   uint16_t num_regs;
   uint16_t num_ins;
-  if (code_item != NULL) {
+  if (code_item != nullptr) {
     num_regs =  code_item->registers_size_;
     num_ins = code_item->ins_size_;
   } else if (method->IsAbstract()) {
@@ -325,7 +336,7 @@
 
   size_t cur_reg = num_regs - num_ins;
   if (!method->IsStatic()) {
-    CHECK(receiver != NULL);
+    CHECK(receiver != nullptr);
     shadow_frame->SetVRegReference(cur_reg, receiver);
     ++cur_reg;
   }
@@ -365,7 +376,7 @@
   }
   if (LIKELY(!method->IsNative())) {
     JValue r = Execute(self, code_item, *shadow_frame, JValue());
-    if (result != NULL) {
+    if (result != nullptr) {
       *result = r;
     }
   } else {
@@ -386,8 +397,9 @@
 void EnterInterpreterFromDeoptimize(Thread* self, ShadowFrame* shadow_frame, JValue* ret_val)
     SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
   JValue value;
-  value.SetJ(ret_val->GetJ());  // Set value to last known result in case the shadow frame chain is empty.
-  while (shadow_frame != NULL) {
+  // Set value to last known result in case the shadow frame chain is empty.
+  value.SetJ(ret_val->GetJ());
+  while (shadow_frame != nullptr) {
     self->SetTopOfShadowStack(shadow_frame);
     const DexFile::CodeItem* code_item = shadow_frame->GetMethod()->GetCodeItem();
     const uint32_t dex_pc = shadow_frame->GetDexPC();
diff --git a/runtime/interpreter/interpreter_common.cc b/runtime/interpreter/interpreter_common.cc
index 3ae611b..4765ebc 100644
--- a/runtime/interpreter/interpreter_common.cc
+++ b/runtime/interpreter/interpreter_common.cc
@@ -489,7 +489,7 @@
   const DexFile::CodeItem* code_item = called_method->GetCodeItem();
   const uint16_t num_ins = (is_range) ? inst->VRegA_3rc(inst_data) : inst->VRegA_35c(inst_data);
   uint16_t num_regs;
-  if (LIKELY(code_item != NULL)) {
+  if (LIKELY(code_item != nullptr)) {
     num_regs = code_item->registers_size_;
     DCHECK_EQ(num_ins, code_item->ins_size_);
   } else {
@@ -543,11 +543,11 @@
       switch (shorty[shorty_pos + 1]) {
         case 'L': {
           Object* o = shadow_frame.GetVRegReference(src_reg);
-          if (do_assignability_check && o != NULL) {
+          if (do_assignability_check && o != nullptr) {
             Class* arg_type =
                 new_shadow_frame->GetMethod()->GetClassFromTypeIndex(
                     params->GetTypeItem(shorty_pos).type_idx_, true);
-            if (arg_type == NULL) {
+            if (arg_type == nullptr) {
               CHECK(self->IsExceptionPending());
               return false;
             }
@@ -651,7 +651,7 @@
   uint16_t type_idx = is_range ? inst->VRegB_3rc() : inst->VRegB_35c();
   Class* arrayClass = ResolveVerifyAndClinit(type_idx, shadow_frame.GetMethod(),
                                              self, false, do_access_check);
-  if (UNLIKELY(arrayClass == NULL)) {
+  if (UNLIKELY(arrayClass == nullptr)) {
     DCHECK(self->IsExceptionPending());
     return false;
   }
@@ -671,7 +671,7 @@
   Object* newArray = Array::Alloc<true>(self, arrayClass, length,
                                         arrayClass->GetComponentSizeShift(),
                                         Runtime::Current()->GetHeap()->GetCurrentAllocator());
-  if (UNLIKELY(newArray == NULL)) {
+  if (UNLIKELY(newArray == nullptr)) {
     DCHECK(self->IsExceptionPending());
     return false;
   }
diff --git a/runtime/interpreter/interpreter_common.h b/runtime/interpreter/interpreter_common.h
index 0e0d56a..dbedc16 100644
--- a/runtime/interpreter/interpreter_common.h
+++ b/runtime/interpreter/interpreter_common.h
@@ -367,9 +367,9 @@
       uint32_t raw_value = shadow_frame.GetVReg(i);
       Object* ref_value = shadow_frame.GetVRegReference(i);
       oss << StringPrintf(" vreg%u=0x%08X", i, raw_value);
-      if (ref_value != NULL) {
+      if (ref_value != nullptr) {
         if (ref_value->GetClass()->IsStringClass() &&
-            ref_value->AsString()->GetCharArray() != NULL) {
+            ref_value->AsString()->GetCharArray() != nullptr) {
           oss << "/java.lang.String \"" << ref_value->AsString()->ToModifiedUtf8() << "\"";
         } else {
           oss << "/" << PrettyTypeOf(ref_value);
diff --git a/runtime/interpreter/interpreter_goto_table_impl.cc b/runtime/interpreter/interpreter_goto_table_impl.cc
index cead26c..dc0b687 100644
--- a/runtime/interpreter/interpreter_goto_table_impl.cc
+++ b/runtime/interpreter/interpreter_goto_table_impl.cc
@@ -55,7 +55,8 @@
   } while (false)
 
 #define UPDATE_HANDLER_TABLE() \
-  currentHandlersTable = handlersTable[Runtime::Current()->GetInstrumentation()->GetInterpreterHandlerTable()]
+  currentHandlersTable = handlersTable[ \
+      Runtime::Current()->GetInstrumentation()->GetInterpreterHandlerTable()]
 
 #define BACKWARD_BRANCH_INSTRUMENTATION(offset) \
   do { \
@@ -328,10 +329,10 @@
     self->AllowThreadSuspension();
     const uint8_t vreg_index = inst->VRegA_11x(inst_data);
     Object* obj_result = shadow_frame.GetVRegReference(vreg_index);
-    if (do_assignability_check && obj_result != NULL) {
+    if (do_assignability_check && obj_result != nullptr) {
       Class* return_type = shadow_frame.GetMethod()->GetReturnType();
       obj_result = shadow_frame.GetVRegReference(vreg_index);
-      if (return_type == NULL) {
+      if (return_type == nullptr) {
         // Return the pending exception.
         HANDLE_PENDING_EXCEPTION();
       }
@@ -364,7 +365,7 @@
     int32_t val = inst->VRegB_11n(inst_data);
     shadow_frame.SetVReg(dst, val);
     if (val == 0) {
-      shadow_frame.SetVRegReference(dst, NULL);
+      shadow_frame.SetVRegReference(dst, nullptr);
     }
     ADVANCE(1);
   }
@@ -375,7 +376,7 @@
     int32_t val = inst->VRegB_21s();
     shadow_frame.SetVReg(dst, val);
     if (val == 0) {
-      shadow_frame.SetVRegReference(dst, NULL);
+      shadow_frame.SetVRegReference(dst, nullptr);
     }
     ADVANCE(2);
   }
@@ -386,7 +387,7 @@
     int32_t val = inst->VRegB_31i();
     shadow_frame.SetVReg(dst, val);
     if (val == 0) {
-      shadow_frame.SetVRegReference(dst, NULL);
+      shadow_frame.SetVRegReference(dst, nullptr);
     }
     ADVANCE(3);
   }
@@ -397,7 +398,7 @@
     int32_t val = static_cast<int32_t>(inst->VRegB_21h() << 16);
     shadow_frame.SetVReg(dst, val);
     if (val == 0) {
-      shadow_frame.SetVRegReference(dst, NULL);
+      shadow_frame.SetVRegReference(dst, nullptr);
     }
     ADVANCE(2);
   }
@@ -426,7 +427,7 @@
 
   HANDLE_INSTRUCTION_START(CONST_STRING) {
     String* s = ResolveString(self, shadow_frame, inst->VRegB_21c());
-    if (UNLIKELY(s == NULL)) {
+    if (UNLIKELY(s == nullptr)) {
       HANDLE_PENDING_EXCEPTION();
     } else {
       shadow_frame.SetVRegReference(inst->VRegA_21c(inst_data), s);
@@ -437,7 +438,7 @@
 
   HANDLE_INSTRUCTION_START(CONST_STRING_JUMBO) {
     String* s = ResolveString(self, shadow_frame, inst->VRegB_31c());
-    if (UNLIKELY(s == NULL)) {
+    if (UNLIKELY(s == nullptr)) {
       HANDLE_PENDING_EXCEPTION();
     } else {
       shadow_frame.SetVRegReference(inst->VRegA_31c(inst_data), s);
@@ -449,7 +450,7 @@
   HANDLE_INSTRUCTION_START(CONST_CLASS) {
     Class* c = ResolveVerifyAndClinit(inst->VRegB_21c(), shadow_frame.GetMethod(),
                                       self, false, do_access_check);
-    if (UNLIKELY(c == NULL)) {
+    if (UNLIKELY(c == nullptr)) {
       HANDLE_PENDING_EXCEPTION();
     } else {
       shadow_frame.SetVRegReference(inst->VRegA_21c(inst_data), c);
@@ -460,7 +461,7 @@
 
   HANDLE_INSTRUCTION_START(MONITOR_ENTER) {
     Object* obj = shadow_frame.GetVRegReference(inst->VRegA_11x(inst_data));
-    if (UNLIKELY(obj == NULL)) {
+    if (UNLIKELY(obj == nullptr)) {
       ThrowNullPointerExceptionFromInterpreter();
       HANDLE_PENDING_EXCEPTION();
     } else {
@@ -472,7 +473,7 @@
 
   HANDLE_INSTRUCTION_START(MONITOR_EXIT) {
     Object* obj = shadow_frame.GetVRegReference(inst->VRegA_11x(inst_data));
-    if (UNLIKELY(obj == NULL)) {
+    if (UNLIKELY(obj == nullptr)) {
       ThrowNullPointerExceptionFromInterpreter();
       HANDLE_PENDING_EXCEPTION();
     } else {
@@ -485,11 +486,11 @@
   HANDLE_INSTRUCTION_START(CHECK_CAST) {
     Class* c = ResolveVerifyAndClinit(inst->VRegB_21c(), shadow_frame.GetMethod(),
                                       self, false, do_access_check);
-    if (UNLIKELY(c == NULL)) {
+    if (UNLIKELY(c == nullptr)) {
       HANDLE_PENDING_EXCEPTION();
     } else {
       Object* obj = shadow_frame.GetVRegReference(inst->VRegA_21c(inst_data));
-      if (UNLIKELY(obj != NULL && !obj->InstanceOf(c))) {
+      if (UNLIKELY(obj != nullptr && !obj->InstanceOf(c))) {
         ThrowClassCastException(c, obj->GetClass());
         HANDLE_PENDING_EXCEPTION();
       } else {
@@ -502,11 +503,11 @@
   HANDLE_INSTRUCTION_START(INSTANCE_OF) {
     Class* c = ResolveVerifyAndClinit(inst->VRegC_22c(), shadow_frame.GetMethod(),
                                       self, false, do_access_check);
-    if (UNLIKELY(c == NULL)) {
+    if (UNLIKELY(c == nullptr)) {
       HANDLE_PENDING_EXCEPTION();
     } else {
       Object* obj = shadow_frame.GetVRegReference(inst->VRegB_22c(inst_data));
-      shadow_frame.SetVReg(inst->VRegA_22c(inst_data), (obj != NULL && obj->InstanceOf(c)) ? 1 : 0);
+      shadow_frame.SetVReg(inst->VRegA_22c(inst_data), (obj != nullptr && obj->InstanceOf(c)) ? 1 : 0);
       ADVANCE(2);
     }
   }
@@ -514,7 +515,7 @@
 
   HANDLE_INSTRUCTION_START(ARRAY_LENGTH) {
     Object* array = shadow_frame.GetVRegReference(inst->VRegB_12x(inst_data));
-    if (UNLIKELY(array == NULL)) {
+    if (UNLIKELY(array == nullptr)) {
       ThrowNullPointerExceptionFromInterpreter();
       HANDLE_PENDING_EXCEPTION();
     } else {
@@ -529,7 +530,7 @@
     Object* obj = AllocObjectFromCode<do_access_check, true>(
         inst->VRegB_21c(), shadow_frame.GetMethod(), self,
         runtime->GetHeap()->GetCurrentAllocator());
-    if (UNLIKELY(obj == NULL)) {
+    if (UNLIKELY(obj == nullptr)) {
       HANDLE_PENDING_EXCEPTION();
     } else {
       obj->GetClass()->AssertInitializedOrInitializingInThread(self);
@@ -551,7 +552,7 @@
     Object* obj = AllocArrayFromCode<do_access_check, true>(
         inst->VRegC_22c(), length, shadow_frame.GetMethod(), self,
         Runtime::Current()->GetHeap()->GetCurrentAllocator());
-    if (UNLIKELY(obj == NULL)) {
+    if (UNLIKELY(obj == nullptr)) {
       HANDLE_PENDING_EXCEPTION();
     } else {
       shadow_frame.SetVRegReference(inst->VRegA_22c(inst_data), obj);
@@ -591,7 +592,7 @@
 
   HANDLE_INSTRUCTION_START(THROW) {
     Object* exception = shadow_frame.GetVRegReference(inst->VRegA_11x(inst_data));
-    if (UNLIKELY(exception == NULL)) {
+    if (UNLIKELY(exception == nullptr)) {
       ThrowNullPointerException("throw with null exception");
     } else if (do_assignability_check && !exception->GetClass()->IsThrowableClass()) {
       // This should never happen.
@@ -778,7 +779,8 @@
   HANDLE_INSTRUCTION_END();
 
   HANDLE_INSTRUCTION_START(IF_NE) {
-    if (shadow_frame.GetVReg(inst->VRegA_22t(inst_data)) != shadow_frame.GetVReg(inst->VRegB_22t(inst_data))) {
+    if (shadow_frame.GetVReg(inst->VRegA_22t(inst_data)) !=
+        shadow_frame.GetVReg(inst->VRegB_22t(inst_data))) {
       int16_t offset = inst->VRegC_22t();
       if (IsBackwardBranch(offset)) {
         BACKWARD_BRANCH_INSTRUMENTATION(offset);
@@ -795,7 +797,8 @@
   HANDLE_INSTRUCTION_END();
 
   HANDLE_INSTRUCTION_START(IF_LT) {
-    if (shadow_frame.GetVReg(inst->VRegA_22t(inst_data)) < shadow_frame.GetVReg(inst->VRegB_22t(inst_data))) {
+    if (shadow_frame.GetVReg(inst->VRegA_22t(inst_data)) <
+        shadow_frame.GetVReg(inst->VRegB_22t(inst_data))) {
       int16_t offset = inst->VRegC_22t();
       if (IsBackwardBranch(offset)) {
         BACKWARD_BRANCH_INSTRUMENTATION(offset);
@@ -812,7 +815,8 @@
   HANDLE_INSTRUCTION_END();
 
   HANDLE_INSTRUCTION_START(IF_GE) {
-    if (shadow_frame.GetVReg(inst->VRegA_22t(inst_data)) >= shadow_frame.GetVReg(inst->VRegB_22t(inst_data))) {
+    if (shadow_frame.GetVReg(inst->VRegA_22t(inst_data)) >=
+        shadow_frame.GetVReg(inst->VRegB_22t(inst_data))) {
       int16_t offset = inst->VRegC_22t();
       if (IsBackwardBranch(offset)) {
         BACKWARD_BRANCH_INSTRUMENTATION(offset);
@@ -829,7 +833,8 @@
   HANDLE_INSTRUCTION_END();
 
   HANDLE_INSTRUCTION_START(IF_GT) {
-    if (shadow_frame.GetVReg(inst->VRegA_22t(inst_data)) > shadow_frame.GetVReg(inst->VRegB_22t(inst_data))) {
+    if (shadow_frame.GetVReg(inst->VRegA_22t(inst_data)) >
+    shadow_frame.GetVReg(inst->VRegB_22t(inst_data))) {
       int16_t offset = inst->VRegC_22t();
       if (IsBackwardBranch(offset)) {
         BACKWARD_BRANCH_INSTRUMENTATION(offset);
@@ -846,7 +851,8 @@
   HANDLE_INSTRUCTION_END();
 
   HANDLE_INSTRUCTION_START(IF_LE) {
-    if (shadow_frame.GetVReg(inst->VRegA_22t(inst_data)) <= shadow_frame.GetVReg(inst->VRegB_22t(inst_data))) {
+    if (shadow_frame.GetVReg(inst->VRegA_22t(inst_data)) <=
+        shadow_frame.GetVReg(inst->VRegB_22t(inst_data))) {
       int16_t offset = inst->VRegC_22t();
       if (IsBackwardBranch(offset)) {
         BACKWARD_BRANCH_INSTRUMENTATION(offset);
@@ -966,7 +972,7 @@
 
   HANDLE_INSTRUCTION_START(AGET_BOOLEAN) {
     Object* a = shadow_frame.GetVRegReference(inst->VRegB_23x());
-    if (UNLIKELY(a == NULL)) {
+    if (UNLIKELY(a == nullptr)) {
       ThrowNullPointerExceptionFromInterpreter();
       HANDLE_PENDING_EXCEPTION();
     } else {
@@ -984,7 +990,7 @@
 
   HANDLE_INSTRUCTION_START(AGET_BYTE) {
     Object* a = shadow_frame.GetVRegReference(inst->VRegB_23x());
-    if (UNLIKELY(a == NULL)) {
+    if (UNLIKELY(a == nullptr)) {
       ThrowNullPointerExceptionFromInterpreter();
       HANDLE_PENDING_EXCEPTION();
     } else {
@@ -1002,7 +1008,7 @@
 
   HANDLE_INSTRUCTION_START(AGET_CHAR) {
     Object* a = shadow_frame.GetVRegReference(inst->VRegB_23x());
-    if (UNLIKELY(a == NULL)) {
+    if (UNLIKELY(a == nullptr)) {
       ThrowNullPointerExceptionFromInterpreter();
       HANDLE_PENDING_EXCEPTION();
     } else {
@@ -1020,7 +1026,7 @@
 
   HANDLE_INSTRUCTION_START(AGET_SHORT) {
     Object* a = shadow_frame.GetVRegReference(inst->VRegB_23x());
-    if (UNLIKELY(a == NULL)) {
+    if (UNLIKELY(a == nullptr)) {
       ThrowNullPointerExceptionFromInterpreter();
       HANDLE_PENDING_EXCEPTION();
     } else {
@@ -1038,7 +1044,7 @@
 
   HANDLE_INSTRUCTION_START(AGET) {
     Object* a = shadow_frame.GetVRegReference(inst->VRegB_23x());
-    if (UNLIKELY(a == NULL)) {
+    if (UNLIKELY(a == nullptr)) {
       ThrowNullPointerExceptionFromInterpreter();
       HANDLE_PENDING_EXCEPTION();
     } else {
@@ -1056,7 +1062,7 @@
 
   HANDLE_INSTRUCTION_START(AGET_WIDE)  {
     Object* a = shadow_frame.GetVRegReference(inst->VRegB_23x());
-    if (UNLIKELY(a == NULL)) {
+    if (UNLIKELY(a == nullptr)) {
       ThrowNullPointerExceptionFromInterpreter();
       HANDLE_PENDING_EXCEPTION();
     } else {
@@ -1074,7 +1080,7 @@
 
   HANDLE_INSTRUCTION_START(AGET_OBJECT) {
     Object* a = shadow_frame.GetVRegReference(inst->VRegB_23x());
-    if (UNLIKELY(a == NULL)) {
+    if (UNLIKELY(a == nullptr)) {
       ThrowNullPointerExceptionFromInterpreter();
       HANDLE_PENDING_EXCEPTION();
     } else {
@@ -1092,7 +1098,7 @@
 
   HANDLE_INSTRUCTION_START(APUT_BOOLEAN) {
     Object* a = shadow_frame.GetVRegReference(inst->VRegB_23x());
-    if (UNLIKELY(a == NULL)) {
+    if (UNLIKELY(a == nullptr)) {
       ThrowNullPointerExceptionFromInterpreter();
       HANDLE_PENDING_EXCEPTION();
     } else {
@@ -1111,7 +1117,7 @@
 
   HANDLE_INSTRUCTION_START(APUT_BYTE) {
     Object* a = shadow_frame.GetVRegReference(inst->VRegB_23x());
-    if (UNLIKELY(a == NULL)) {
+    if (UNLIKELY(a == nullptr)) {
       ThrowNullPointerExceptionFromInterpreter();
       HANDLE_PENDING_EXCEPTION();
     } else {
@@ -1130,7 +1136,7 @@
 
   HANDLE_INSTRUCTION_START(APUT_CHAR) {
     Object* a = shadow_frame.GetVRegReference(inst->VRegB_23x());
-    if (UNLIKELY(a == NULL)) {
+    if (UNLIKELY(a == nullptr)) {
       ThrowNullPointerExceptionFromInterpreter();
       HANDLE_PENDING_EXCEPTION();
     } else {
@@ -1149,7 +1155,7 @@
 
   HANDLE_INSTRUCTION_START(APUT_SHORT) {
     Object* a = shadow_frame.GetVRegReference(inst->VRegB_23x());
-    if (UNLIKELY(a == NULL)) {
+    if (UNLIKELY(a == nullptr)) {
       ThrowNullPointerExceptionFromInterpreter();
       HANDLE_PENDING_EXCEPTION();
     } else {
@@ -1168,7 +1174,7 @@
 
   HANDLE_INSTRUCTION_START(APUT) {
     Object* a = shadow_frame.GetVRegReference(inst->VRegB_23x());
-    if (UNLIKELY(a == NULL)) {
+    if (UNLIKELY(a == nullptr)) {
       ThrowNullPointerExceptionFromInterpreter();
       HANDLE_PENDING_EXCEPTION();
     } else {
@@ -1187,7 +1193,7 @@
 
   HANDLE_INSTRUCTION_START(APUT_WIDE) {
     Object* a = shadow_frame.GetVRegReference(inst->VRegB_23x());
-    if (UNLIKELY(a == NULL)) {
+    if (UNLIKELY(a == nullptr)) {
       ThrowNullPointerExceptionFromInterpreter();
       HANDLE_PENDING_EXCEPTION();
     } else {
@@ -1206,7 +1212,7 @@
 
   HANDLE_INSTRUCTION_START(APUT_OBJECT) {
     Object* a = shadow_frame.GetVRegReference(inst->VRegB_23x());
-    if (UNLIKELY(a == NULL)) {
+    if (UNLIKELY(a == nullptr)) {
       ThrowNullPointerExceptionFromInterpreter();
       HANDLE_PENDING_EXCEPTION();
     } else {
@@ -1224,43 +1230,50 @@
   HANDLE_INSTRUCTION_END();
 
   HANDLE_INSTRUCTION_START(IGET_BOOLEAN) {
-    bool success = DoFieldGet<InstancePrimitiveRead, Primitive::kPrimBoolean, do_access_check>(self, shadow_frame, inst, inst_data);
+    bool success = DoFieldGet<InstancePrimitiveRead, Primitive::kPrimBoolean, do_access_check>(
+        self, shadow_frame, inst, inst_data);
     POSSIBLY_HANDLE_PENDING_EXCEPTION(!success, 2);
   }
   HANDLE_INSTRUCTION_END();
 
   HANDLE_INSTRUCTION_START(IGET_BYTE) {
-    bool success = DoFieldGet<InstancePrimitiveRead, Primitive::kPrimByte, do_access_check>(self, shadow_frame, inst, inst_data);
+    bool success = DoFieldGet<InstancePrimitiveRead, Primitive::kPrimByte, do_access_check>(
+        self, shadow_frame, inst, inst_data);
     POSSIBLY_HANDLE_PENDING_EXCEPTION(!success, 2);
   }
   HANDLE_INSTRUCTION_END();
 
   HANDLE_INSTRUCTION_START(IGET_CHAR) {
-    bool success = DoFieldGet<InstancePrimitiveRead, Primitive::kPrimChar, do_access_check>(self, shadow_frame, inst, inst_data);
+    bool success = DoFieldGet<InstancePrimitiveRead, Primitive::kPrimChar, do_access_check>(
+        self, shadow_frame, inst, inst_data);
     POSSIBLY_HANDLE_PENDING_EXCEPTION(!success, 2);
   }
   HANDLE_INSTRUCTION_END();
 
   HANDLE_INSTRUCTION_START(IGET_SHORT) {
-    bool success = DoFieldGet<InstancePrimitiveRead, Primitive::kPrimShort, do_access_check>(self, shadow_frame, inst, inst_data);
+    bool success = DoFieldGet<InstancePrimitiveRead, Primitive::kPrimShort, do_access_check>(
+        self, shadow_frame, inst, inst_data);
     POSSIBLY_HANDLE_PENDING_EXCEPTION(!success, 2);
   }
   HANDLE_INSTRUCTION_END();
 
   HANDLE_INSTRUCTION_START(IGET) {
-    bool success = DoFieldGet<InstancePrimitiveRead, Primitive::kPrimInt, do_access_check>(self, shadow_frame, inst, inst_data);
+    bool success = DoFieldGet<InstancePrimitiveRead, Primitive::kPrimInt, do_access_check>(
+        self, shadow_frame, inst, inst_data);
     POSSIBLY_HANDLE_PENDING_EXCEPTION(!success, 2);
   }
   HANDLE_INSTRUCTION_END();
 
   HANDLE_INSTRUCTION_START(IGET_WIDE) {
-    bool success = DoFieldGet<InstancePrimitiveRead, Primitive::kPrimLong, do_access_check>(self, shadow_frame, inst, inst_data);
+    bool success = DoFieldGet<InstancePrimitiveRead, Primitive::kPrimLong, do_access_check>(
+        self, shadow_frame, inst, inst_data);
     POSSIBLY_HANDLE_PENDING_EXCEPTION(!success, 2);
   }
   HANDLE_INSTRUCTION_END();
 
   HANDLE_INSTRUCTION_START(IGET_OBJECT) {
-    bool success = DoFieldGet<InstanceObjectRead, Primitive::kPrimNot, do_access_check>(self, shadow_frame, inst, inst_data);
+    bool success = DoFieldGet<InstanceObjectRead, Primitive::kPrimNot, do_access_check>(
+        self, shadow_frame, inst, inst_data);
     POSSIBLY_HANDLE_PENDING_EXCEPTION(!success, 2);
   }
   HANDLE_INSTRUCTION_END();
@@ -1308,314 +1321,366 @@
   HANDLE_INSTRUCTION_END();
 
   HANDLE_INSTRUCTION_START(SGET_BOOLEAN) {
-    bool success = DoFieldGet<StaticPrimitiveRead, Primitive::kPrimBoolean, do_access_check>(self, shadow_frame, inst, inst_data);
+    bool success = DoFieldGet<StaticPrimitiveRead, Primitive::kPrimBoolean, do_access_check>(
+        self, shadow_frame, inst, inst_data);
     POSSIBLY_HANDLE_PENDING_EXCEPTION(!success, 2);
   }
   HANDLE_INSTRUCTION_END();
 
   HANDLE_INSTRUCTION_START(SGET_BYTE) {
-    bool success = DoFieldGet<StaticPrimitiveRead, Primitive::kPrimByte, do_access_check>(self, shadow_frame, inst, inst_data);
+    bool success = DoFieldGet<StaticPrimitiveRead, Primitive::kPrimByte, do_access_check>(
+        self, shadow_frame, inst, inst_data);
     POSSIBLY_HANDLE_PENDING_EXCEPTION(!success, 2);
   }
   HANDLE_INSTRUCTION_END();
 
   HANDLE_INSTRUCTION_START(SGET_CHAR) {
-    bool success = DoFieldGet<StaticPrimitiveRead, Primitive::kPrimChar, do_access_check>(self, shadow_frame, inst, inst_data);
+    bool success = DoFieldGet<StaticPrimitiveRead, Primitive::kPrimChar, do_access_check>(
+        self, shadow_frame, inst, inst_data);
     POSSIBLY_HANDLE_PENDING_EXCEPTION(!success, 2);
   }
   HANDLE_INSTRUCTION_END();
 
   HANDLE_INSTRUCTION_START(SGET_SHORT) {
-    bool success = DoFieldGet<StaticPrimitiveRead, Primitive::kPrimShort, do_access_check>(self, shadow_frame, inst, inst_data);
+    bool success = DoFieldGet<StaticPrimitiveRead, Primitive::kPrimShort, do_access_check>(
+        self, shadow_frame, inst, inst_data);
     POSSIBLY_HANDLE_PENDING_EXCEPTION(!success, 2);
   }
   HANDLE_INSTRUCTION_END();
 
   HANDLE_INSTRUCTION_START(SGET) {
-    bool success = DoFieldGet<StaticPrimitiveRead, Primitive::kPrimInt, do_access_check>(self, shadow_frame, inst, inst_data);
+    bool success = DoFieldGet<StaticPrimitiveRead, Primitive::kPrimInt, do_access_check>(
+        self, shadow_frame, inst, inst_data);
     POSSIBLY_HANDLE_PENDING_EXCEPTION(!success, 2);
   }
   HANDLE_INSTRUCTION_END();
 
   HANDLE_INSTRUCTION_START(SGET_WIDE) {
-    bool success = DoFieldGet<StaticPrimitiveRead, Primitive::kPrimLong, do_access_check>(self, shadow_frame, inst, inst_data);
+    bool success = DoFieldGet<StaticPrimitiveRead, Primitive::kPrimLong, do_access_check>(
+        self, shadow_frame, inst, inst_data);
     POSSIBLY_HANDLE_PENDING_EXCEPTION(!success, 2);
   }
   HANDLE_INSTRUCTION_END();
 
   HANDLE_INSTRUCTION_START(SGET_OBJECT) {
-    bool success = DoFieldGet<StaticObjectRead, Primitive::kPrimNot, do_access_check>(self, shadow_frame, inst, inst_data);
+    bool success = DoFieldGet<StaticObjectRead, Primitive::kPrimNot, do_access_check>(
+        self, shadow_frame, inst, inst_data);
     POSSIBLY_HANDLE_PENDING_EXCEPTION(!success, 2);
   }
   HANDLE_INSTRUCTION_END();
 
   HANDLE_INSTRUCTION_START(IPUT_BOOLEAN) {
-    bool success = DoFieldPut<InstancePrimitiveWrite, Primitive::kPrimBoolean, do_access_check, transaction_active>(self, shadow_frame, inst, inst_data);
+    bool success = DoFieldPut<InstancePrimitiveWrite, Primitive::kPrimBoolean, do_access_check,
+        transaction_active>(self, shadow_frame, inst, inst_data);
     POSSIBLY_HANDLE_PENDING_EXCEPTION(!success, 2);
   }
   HANDLE_INSTRUCTION_END();
 
   HANDLE_INSTRUCTION_START(IPUT_BYTE) {
-    bool success = DoFieldPut<InstancePrimitiveWrite, Primitive::kPrimByte, do_access_check, transaction_active>(self, shadow_frame, inst, inst_data);
+    bool success = DoFieldPut<InstancePrimitiveWrite, Primitive::kPrimByte, do_access_check,
+        transaction_active>(self, shadow_frame, inst, inst_data);
     POSSIBLY_HANDLE_PENDING_EXCEPTION(!success, 2);
   }
   HANDLE_INSTRUCTION_END();
 
   HANDLE_INSTRUCTION_START(IPUT_CHAR) {
-    bool success = DoFieldPut<InstancePrimitiveWrite, Primitive::kPrimChar, do_access_check, transaction_active>(self, shadow_frame, inst, inst_data);
+    bool success = DoFieldPut<InstancePrimitiveWrite, Primitive::kPrimChar, do_access_check,
+        transaction_active>(self, shadow_frame, inst, inst_data);
     POSSIBLY_HANDLE_PENDING_EXCEPTION(!success, 2);
   }
   HANDLE_INSTRUCTION_END();
 
   HANDLE_INSTRUCTION_START(IPUT_SHORT) {
-    bool success = DoFieldPut<InstancePrimitiveWrite, Primitive::kPrimShort, do_access_check, transaction_active>(self, shadow_frame, inst, inst_data);
+    bool success = DoFieldPut<InstancePrimitiveWrite, Primitive::kPrimShort, do_access_check,
+        transaction_active>(self, shadow_frame, inst, inst_data);
     POSSIBLY_HANDLE_PENDING_EXCEPTION(!success, 2);
   }
   HANDLE_INSTRUCTION_END();
 
   HANDLE_INSTRUCTION_START(IPUT) {
-    bool success = DoFieldPut<InstancePrimitiveWrite, Primitive::kPrimInt, do_access_check, transaction_active>(self, shadow_frame, inst, inst_data);
+    bool success = DoFieldPut<InstancePrimitiveWrite, Primitive::kPrimInt, do_access_check,
+        transaction_active>(self, shadow_frame, inst, inst_data);
     POSSIBLY_HANDLE_PENDING_EXCEPTION(!success, 2);
   }
   HANDLE_INSTRUCTION_END();
 
   HANDLE_INSTRUCTION_START(IPUT_WIDE) {
-    bool success = DoFieldPut<InstancePrimitiveWrite, Primitive::kPrimLong, do_access_check, transaction_active>(self, shadow_frame, inst, inst_data);
+    bool success = DoFieldPut<InstancePrimitiveWrite, Primitive::kPrimLong, do_access_check,
+        transaction_active>(self, shadow_frame, inst, inst_data);
     POSSIBLY_HANDLE_PENDING_EXCEPTION(!success, 2);
   }
   HANDLE_INSTRUCTION_END();
 
   HANDLE_INSTRUCTION_START(IPUT_OBJECT) {
-    bool success = DoFieldPut<InstanceObjectWrite, Primitive::kPrimNot, do_access_check, transaction_active>(self, shadow_frame, inst, inst_data);
+    bool success = DoFieldPut<InstanceObjectWrite, Primitive::kPrimNot, do_access_check,
+        transaction_active>(self, shadow_frame, inst, inst_data);
     POSSIBLY_HANDLE_PENDING_EXCEPTION(!success, 2);
   }
   HANDLE_INSTRUCTION_END();
 
   HANDLE_INSTRUCTION_START(IPUT_QUICK) {
-    bool success = DoIPutQuick<Primitive::kPrimInt, transaction_active>(shadow_frame, inst, inst_data);
+    bool success = DoIPutQuick<Primitive::kPrimInt, transaction_active>(
+        shadow_frame, inst, inst_data);
     POSSIBLY_HANDLE_PENDING_EXCEPTION(!success, 2);
   }
   HANDLE_INSTRUCTION_END();
 
   HANDLE_INSTRUCTION_START(IPUT_BOOLEAN_QUICK) {
-    bool success = DoIPutQuick<Primitive::kPrimBoolean, transaction_active>(shadow_frame, inst, inst_data);
+    bool success = DoIPutQuick<Primitive::kPrimBoolean, transaction_active>(
+        shadow_frame, inst, inst_data);
     POSSIBLY_HANDLE_PENDING_EXCEPTION(!success, 2);
   }
   HANDLE_INSTRUCTION_END();
 
   HANDLE_INSTRUCTION_START(IPUT_BYTE_QUICK) {
-    bool success = DoIPutQuick<Primitive::kPrimByte, transaction_active>(shadow_frame, inst, inst_data);
+    bool success = DoIPutQuick<Primitive::kPrimByte, transaction_active>(
+        shadow_frame, inst, inst_data);
     POSSIBLY_HANDLE_PENDING_EXCEPTION(!success, 2);
   }
   HANDLE_INSTRUCTION_END();
 
   HANDLE_INSTRUCTION_START(IPUT_CHAR_QUICK) {
-    bool success = DoIPutQuick<Primitive::kPrimChar, transaction_active>(shadow_frame, inst, inst_data);
+    bool success = DoIPutQuick<Primitive::kPrimChar, transaction_active>(
+        shadow_frame, inst, inst_data);
     POSSIBLY_HANDLE_PENDING_EXCEPTION(!success, 2);
   }
   HANDLE_INSTRUCTION_END();
 
   HANDLE_INSTRUCTION_START(IPUT_SHORT_QUICK) {
-    bool success = DoIPutQuick<Primitive::kPrimShort, transaction_active>(shadow_frame, inst, inst_data);
+    bool success = DoIPutQuick<Primitive::kPrimShort, transaction_active>(
+        shadow_frame, inst, inst_data);
     POSSIBLY_HANDLE_PENDING_EXCEPTION(!success, 2);
   }
   HANDLE_INSTRUCTION_END();
 
   HANDLE_INSTRUCTION_START(IPUT_WIDE_QUICK) {
-    bool success = DoIPutQuick<Primitive::kPrimLong, transaction_active>(shadow_frame, inst, inst_data);
+    bool success = DoIPutQuick<Primitive::kPrimLong, transaction_active>(
+        shadow_frame, inst, inst_data);
     POSSIBLY_HANDLE_PENDING_EXCEPTION(!success, 2);
   }
   HANDLE_INSTRUCTION_END();
 
   HANDLE_INSTRUCTION_START(IPUT_OBJECT_QUICK) {
-    bool success = DoIPutQuick<Primitive::kPrimNot, transaction_active>(shadow_frame, inst, inst_data);
+    bool success = DoIPutQuick<Primitive::kPrimNot, transaction_active>(
+        shadow_frame, inst, inst_data);
     POSSIBLY_HANDLE_PENDING_EXCEPTION(!success, 2);
   }
   HANDLE_INSTRUCTION_END();
 
   HANDLE_INSTRUCTION_START(SPUT_BOOLEAN) {
-    bool success = DoFieldPut<StaticPrimitiveWrite, Primitive::kPrimBoolean, do_access_check, transaction_active>(self, shadow_frame, inst, inst_data);
+    bool success = DoFieldPut<StaticPrimitiveWrite, Primitive::kPrimBoolean, do_access_check,
+        transaction_active>(self, shadow_frame, inst, inst_data);
     POSSIBLY_HANDLE_PENDING_EXCEPTION(!success, 2);
   }
   HANDLE_INSTRUCTION_END();
 
   HANDLE_INSTRUCTION_START(SPUT_BYTE) {
-    bool success = DoFieldPut<StaticPrimitiveWrite, Primitive::kPrimByte, do_access_check, transaction_active>(self, shadow_frame, inst, inst_data);
+    bool success = DoFieldPut<StaticPrimitiveWrite, Primitive::kPrimByte, do_access_check,
+        transaction_active>(self, shadow_frame, inst, inst_data);
     POSSIBLY_HANDLE_PENDING_EXCEPTION(!success, 2);
   }
   HANDLE_INSTRUCTION_END();
 
   HANDLE_INSTRUCTION_START(SPUT_CHAR) {
-    bool success = DoFieldPut<StaticPrimitiveWrite, Primitive::kPrimChar, do_access_check, transaction_active>(self, shadow_frame, inst, inst_data);
+    bool success = DoFieldPut<StaticPrimitiveWrite, Primitive::kPrimChar, do_access_check,
+        transaction_active>(self, shadow_frame, inst, inst_data);
     POSSIBLY_HANDLE_PENDING_EXCEPTION(!success, 2);
   }
   HANDLE_INSTRUCTION_END();
 
   HANDLE_INSTRUCTION_START(SPUT_SHORT) {
-    bool success = DoFieldPut<StaticPrimitiveWrite, Primitive::kPrimShort, do_access_check, transaction_active>(self, shadow_frame, inst, inst_data);
+    bool success = DoFieldPut<StaticPrimitiveWrite, Primitive::kPrimShort, do_access_check,
+        transaction_active>(self, shadow_frame, inst, inst_data);
     POSSIBLY_HANDLE_PENDING_EXCEPTION(!success, 2);
   }
   HANDLE_INSTRUCTION_END();
 
   HANDLE_INSTRUCTION_START(SPUT) {
-    bool success = DoFieldPut<StaticPrimitiveWrite, Primitive::kPrimInt, do_access_check, transaction_active>(self, shadow_frame, inst, inst_data);
+    bool success = DoFieldPut<StaticPrimitiveWrite, Primitive::kPrimInt, do_access_check,
+        transaction_active>(self, shadow_frame, inst, inst_data);
     POSSIBLY_HANDLE_PENDING_EXCEPTION(!success, 2);
   }
   HANDLE_INSTRUCTION_END();
 
   HANDLE_INSTRUCTION_START(SPUT_WIDE) {
-    bool success = DoFieldPut<StaticPrimitiveWrite, Primitive::kPrimLong, do_access_check, transaction_active>(self, shadow_frame, inst, inst_data);
+    bool success = DoFieldPut<StaticPrimitiveWrite, Primitive::kPrimLong, do_access_check,
+        transaction_active>(self, shadow_frame, inst, inst_data);
     POSSIBLY_HANDLE_PENDING_EXCEPTION(!success, 2);
   }
   HANDLE_INSTRUCTION_END();
 
   HANDLE_INSTRUCTION_START(SPUT_OBJECT) {
-    bool success = DoFieldPut<StaticObjectWrite, Primitive::kPrimNot, do_access_check, transaction_active>(self, shadow_frame, inst, inst_data);
+    bool success = DoFieldPut<StaticObjectWrite, Primitive::kPrimNot, do_access_check,
+        transaction_active>(self, shadow_frame, inst, inst_data);
     POSSIBLY_HANDLE_PENDING_EXCEPTION(!success, 2);
   }
   HANDLE_INSTRUCTION_END();
 
   HANDLE_INSTRUCTION_START(INVOKE_VIRTUAL) {
-    bool success = DoInvoke<kVirtual, false, do_access_check>(self, shadow_frame, inst, inst_data, &result_register);
+    bool success = DoInvoke<kVirtual, false, do_access_check>(
+        self, shadow_frame, inst, inst_data, &result_register);
     UPDATE_HANDLER_TABLE();
     POSSIBLY_HANDLE_PENDING_EXCEPTION(!success, 3);
   }
   HANDLE_INSTRUCTION_END();
 
   HANDLE_INSTRUCTION_START(INVOKE_VIRTUAL_RANGE) {
-    bool success = DoInvoke<kVirtual, true, do_access_check>(self, shadow_frame, inst, inst_data, &result_register);
+    bool success = DoInvoke<kVirtual, true, do_access_check>(
+        self, shadow_frame, inst, inst_data, &result_register);
     UPDATE_HANDLER_TABLE();
     POSSIBLY_HANDLE_PENDING_EXCEPTION(!success, 3);
   }
   HANDLE_INSTRUCTION_END();
 
   HANDLE_INSTRUCTION_START(INVOKE_SUPER) {
-    bool success = DoInvoke<kSuper, false, do_access_check>(self, shadow_frame, inst, inst_data, &result_register);
+    bool success = DoInvoke<kSuper, false, do_access_check>(
+        self, shadow_frame, inst, inst_data, &result_register);
     UPDATE_HANDLER_TABLE();
     POSSIBLY_HANDLE_PENDING_EXCEPTION(!success, 3);
   }
   HANDLE_INSTRUCTION_END();
 
   HANDLE_INSTRUCTION_START(INVOKE_SUPER_RANGE) {
-    bool success = DoInvoke<kSuper, true, do_access_check>(self, shadow_frame, inst, inst_data, &result_register);
+    bool success = DoInvoke<kSuper, true, do_access_check>(
+        self, shadow_frame, inst, inst_data, &result_register);
     UPDATE_HANDLER_TABLE();
     POSSIBLY_HANDLE_PENDING_EXCEPTION(!success, 3);
   }
   HANDLE_INSTRUCTION_END();
 
   HANDLE_INSTRUCTION_START(INVOKE_DIRECT) {
-    bool success = DoInvoke<kDirect, false, do_access_check>(self, shadow_frame, inst, inst_data, &result_register);
+    bool success = DoInvoke<kDirect, false, do_access_check>(
+        self, shadow_frame, inst, inst_data, &result_register);
     UPDATE_HANDLER_TABLE();
     POSSIBLY_HANDLE_PENDING_EXCEPTION(!success, 3);
   }
   HANDLE_INSTRUCTION_END();
 
   HANDLE_INSTRUCTION_START(INVOKE_DIRECT_RANGE) {
-    bool success = DoInvoke<kDirect, true, do_access_check>(self, shadow_frame, inst, inst_data, &result_register);
+    bool success = DoInvoke<kDirect, true, do_access_check>(
+        self, shadow_frame, inst, inst_data, &result_register);
     UPDATE_HANDLER_TABLE();
     POSSIBLY_HANDLE_PENDING_EXCEPTION(!success, 3);
   }
   HANDLE_INSTRUCTION_END();
 
   HANDLE_INSTRUCTION_START(INVOKE_INTERFACE) {
-    bool success = DoInvoke<kInterface, false, do_access_check>(self, shadow_frame, inst, inst_data, &result_register);
+    bool success = DoInvoke<kInterface, false, do_access_check>(
+        self, shadow_frame, inst, inst_data, &result_register);
     UPDATE_HANDLER_TABLE();
     POSSIBLY_HANDLE_PENDING_EXCEPTION(!success, 3);
   }
   HANDLE_INSTRUCTION_END();
 
   HANDLE_INSTRUCTION_START(INVOKE_INTERFACE_RANGE) {
-    bool success = DoInvoke<kInterface, true, do_access_check>(self, shadow_frame, inst, inst_data, &result_register);
+    bool success = DoInvoke<kInterface, true, do_access_check>(
+        self, shadow_frame, inst, inst_data, &result_register);
     UPDATE_HANDLER_TABLE();
     POSSIBLY_HANDLE_PENDING_EXCEPTION(!success, 3);
   }
   HANDLE_INSTRUCTION_END();
 
   HANDLE_INSTRUCTION_START(INVOKE_STATIC) {
-    bool success = DoInvoke<kStatic, false, do_access_check>(self, shadow_frame, inst, inst_data, &result_register);
+    bool success = DoInvoke<kStatic, false, do_access_check>(
+        self, shadow_frame, inst, inst_data, &result_register);
     UPDATE_HANDLER_TABLE();
     POSSIBLY_HANDLE_PENDING_EXCEPTION(!success, 3);
   }
   HANDLE_INSTRUCTION_END();
 
   HANDLE_INSTRUCTION_START(INVOKE_STATIC_RANGE) {
-    bool success = DoInvoke<kStatic, true, do_access_check>(self, shadow_frame, inst, inst_data, &result_register);
+    bool success = DoInvoke<kStatic, true, do_access_check>(
+        self, shadow_frame, inst, inst_data, &result_register);
     UPDATE_HANDLER_TABLE();
     POSSIBLY_HANDLE_PENDING_EXCEPTION(!success, 3);
   }
   HANDLE_INSTRUCTION_END();
 
   HANDLE_INSTRUCTION_START(INVOKE_VIRTUAL_QUICK) {
-    bool success = DoInvokeVirtualQuick<false>(self, shadow_frame, inst, inst_data, &result_register);
+    bool success = DoInvokeVirtualQuick<false>(
+        self, shadow_frame, inst, inst_data, &result_register);
     UPDATE_HANDLER_TABLE();
     POSSIBLY_HANDLE_PENDING_EXCEPTION(!success, 3);
   }
   HANDLE_INSTRUCTION_END();
 
   HANDLE_INSTRUCTION_START(INVOKE_VIRTUAL_RANGE_QUICK) {
-    bool success = DoInvokeVirtualQuick<true>(self, shadow_frame, inst, inst_data, &result_register);
+    bool success = DoInvokeVirtualQuick<true>(
+        self, shadow_frame, inst, inst_data, &result_register);
     UPDATE_HANDLER_TABLE();
     POSSIBLY_HANDLE_PENDING_EXCEPTION(!success, 3);
   }
   HANDLE_INSTRUCTION_END();
 
   HANDLE_INSTRUCTION_START(NEG_INT)
-    shadow_frame.SetVReg(inst->VRegA_12x(inst_data), -shadow_frame.GetVReg(inst->VRegB_12x(inst_data)));
+    shadow_frame.SetVReg(
+        inst->VRegA_12x(inst_data), -shadow_frame.GetVReg(inst->VRegB_12x(inst_data)));
     ADVANCE(1);
   HANDLE_INSTRUCTION_END();
 
   HANDLE_INSTRUCTION_START(NOT_INT)
-    shadow_frame.SetVReg(inst->VRegA_12x(inst_data), ~shadow_frame.GetVReg(inst->VRegB_12x(inst_data)));
+    shadow_frame.SetVReg(
+        inst->VRegA_12x(inst_data), ~shadow_frame.GetVReg(inst->VRegB_12x(inst_data)));
     ADVANCE(1);
   HANDLE_INSTRUCTION_END();
 
   HANDLE_INSTRUCTION_START(NEG_LONG)
-    shadow_frame.SetVRegLong(inst->VRegA_12x(inst_data), -shadow_frame.GetVRegLong(inst->VRegB_12x(inst_data)));
+    shadow_frame.SetVRegLong(
+        inst->VRegA_12x(inst_data), -shadow_frame.GetVRegLong(inst->VRegB_12x(inst_data)));
     ADVANCE(1);
   HANDLE_INSTRUCTION_END();
 
   HANDLE_INSTRUCTION_START(NOT_LONG)
-    shadow_frame.SetVRegLong(inst->VRegA_12x(inst_data), ~shadow_frame.GetVRegLong(inst->VRegB_12x(inst_data)));
+    shadow_frame.SetVRegLong(
+        inst->VRegA_12x(inst_data), ~shadow_frame.GetVRegLong(inst->VRegB_12x(inst_data)));
     ADVANCE(1);
   HANDLE_INSTRUCTION_END();
 
   HANDLE_INSTRUCTION_START(NEG_FLOAT)
-    shadow_frame.SetVRegFloat(inst->VRegA_12x(inst_data), -shadow_frame.GetVRegFloat(inst->VRegB_12x(inst_data)));
+    shadow_frame.SetVRegFloat(
+        inst->VRegA_12x(inst_data), -shadow_frame.GetVRegFloat(inst->VRegB_12x(inst_data)));
     ADVANCE(1);
   HANDLE_INSTRUCTION_END();
 
   HANDLE_INSTRUCTION_START(NEG_DOUBLE)
-    shadow_frame.SetVRegDouble(inst->VRegA_12x(inst_data), -shadow_frame.GetVRegDouble(inst->VRegB_12x(inst_data)));
+    shadow_frame.SetVRegDouble(
+        inst->VRegA_12x(inst_data), -shadow_frame.GetVRegDouble(inst->VRegB_12x(inst_data)));
     ADVANCE(1);
   HANDLE_INSTRUCTION_END();
 
   HANDLE_INSTRUCTION_START(INT_TO_LONG)
-    shadow_frame.SetVRegLong(inst->VRegA_12x(inst_data), shadow_frame.GetVReg(inst->VRegB_12x(inst_data)));
+    shadow_frame.SetVRegLong(
+        inst->VRegA_12x(inst_data), shadow_frame.GetVReg(inst->VRegB_12x(inst_data)));
     ADVANCE(1);
   HANDLE_INSTRUCTION_END();
 
   HANDLE_INSTRUCTION_START(INT_TO_FLOAT)
-    shadow_frame.SetVRegFloat(inst->VRegA_12x(inst_data), shadow_frame.GetVReg(inst->VRegB_12x(inst_data)));
+    shadow_frame.SetVRegFloat(
+        inst->VRegA_12x(inst_data), shadow_frame.GetVReg(inst->VRegB_12x(inst_data)));
     ADVANCE(1);
   HANDLE_INSTRUCTION_END();
 
   HANDLE_INSTRUCTION_START(INT_TO_DOUBLE)
-    shadow_frame.SetVRegDouble(inst->VRegA_12x(inst_data), shadow_frame.GetVReg(inst->VRegB_12x(inst_data)));
+    shadow_frame.SetVRegDouble(
+        inst->VRegA_12x(inst_data), shadow_frame.GetVReg(inst->VRegB_12x(inst_data)));
     ADVANCE(1);
   HANDLE_INSTRUCTION_END();
 
   HANDLE_INSTRUCTION_START(LONG_TO_INT)
-    shadow_frame.SetVReg(inst->VRegA_12x(inst_data), shadow_frame.GetVRegLong(inst->VRegB_12x(inst_data)));
+    shadow_frame.SetVReg(
+        inst->VRegA_12x(inst_data), shadow_frame.GetVRegLong(inst->VRegB_12x(inst_data)));
     ADVANCE(1);
   HANDLE_INSTRUCTION_END();
 
   HANDLE_INSTRUCTION_START(LONG_TO_FLOAT)
-    shadow_frame.SetVRegFloat(inst->VRegA_12x(inst_data), shadow_frame.GetVRegLong(inst->VRegB_12x(inst_data)));
+    shadow_frame.SetVRegFloat(
+        inst->VRegA_12x(inst_data), shadow_frame.GetVRegLong(inst->VRegB_12x(inst_data)));
     ADVANCE(1);
   HANDLE_INSTRUCTION_END();
 
   HANDLE_INSTRUCTION_START(LONG_TO_DOUBLE)
-    shadow_frame.SetVRegDouble(inst->VRegA_12x(inst_data), shadow_frame.GetVRegLong(inst->VRegB_12x(inst_data)));
+    shadow_frame.SetVRegDouble(
+        inst->VRegA_12x(inst_data), shadow_frame.GetVRegLong(inst->VRegB_12x(inst_data)));
     ADVANCE(1);
   HANDLE_INSTRUCTION_END();
 
@@ -1636,7 +1701,8 @@
   HANDLE_INSTRUCTION_END();
 
   HANDLE_INSTRUCTION_START(FLOAT_TO_DOUBLE)
-    shadow_frame.SetVRegDouble(inst->VRegA_12x(inst_data), shadow_frame.GetVRegFloat(inst->VRegB_12x(inst_data)));
+    shadow_frame.SetVRegDouble(
+        inst->VRegA_12x(inst_data), shadow_frame.GetVRegFloat(inst->VRegB_12x(inst_data)));
     ADVANCE(1);
   HANDLE_INSTRUCTION_END();
 
@@ -1657,7 +1723,8 @@
   HANDLE_INSTRUCTION_END();
 
   HANDLE_INSTRUCTION_START(DOUBLE_TO_FLOAT)
-    shadow_frame.SetVRegFloat(inst->VRegA_12x(inst_data), shadow_frame.GetVRegDouble(inst->VRegB_12x(inst_data)));
+    shadow_frame.SetVRegFloat(
+        inst->VRegA_12x(inst_data), shadow_frame.GetVRegDouble(inst->VRegB_12x(inst_data)));
     ADVANCE(1);
   HANDLE_INSTRUCTION_END();
 
@@ -2213,15 +2280,17 @@
   HANDLE_INSTRUCTION_END();
 
   HANDLE_INSTRUCTION_START(DIV_INT_LIT16) {
-    bool success = DoIntDivide(shadow_frame, inst->VRegA_22s(inst_data),
-                               shadow_frame.GetVReg(inst->VRegB_22s(inst_data)), inst->VRegC_22s());
+    bool success = DoIntDivide(
+        shadow_frame, inst->VRegA_22s(inst_data), shadow_frame.GetVReg(inst->VRegB_22s(inst_data)),
+        inst->VRegC_22s());
     POSSIBLY_HANDLE_PENDING_EXCEPTION(!success, 2);
   }
   HANDLE_INSTRUCTION_END();
 
   HANDLE_INSTRUCTION_START(REM_INT_LIT16) {
-    bool success = DoIntRemainder(shadow_frame, inst->VRegA_22s(inst_data),
-                                  shadow_frame.GetVReg(inst->VRegB_22s(inst_data)), inst->VRegC_22s());
+    bool success = DoIntRemainder(
+        shadow_frame, inst->VRegA_22s(inst_data), shadow_frame.GetVReg(inst->VRegB_22s(inst_data)),
+        inst->VRegC_22s());
     POSSIBLY_HANDLE_PENDING_EXCEPTION(!success, 2);
   }
   HANDLE_INSTRUCTION_END();
diff --git a/runtime/interpreter/interpreter_switch_impl.cc b/runtime/interpreter/interpreter_switch_impl.cc
index fe7ad77..82f0009 100644
--- a/runtime/interpreter/interpreter_switch_impl.cc
+++ b/runtime/interpreter/interpreter_switch_impl.cc
@@ -68,7 +68,7 @@
 
   uint32_t dex_pc = shadow_frame.GetDexPC();
   bool notified_method_entry_event = false;
-  const instrumentation::Instrumentation* const instrumentation = Runtime::Current()->GetInstrumentation();
+  const auto* const instrumentation = Runtime::Current()->GetInstrumentation();
   if (LIKELY(dex_pc == 0)) {  // We are entering the method as opposed to deoptimizing.
     if (kIsDebugBuild) {
         self->AssertNoPendingException();
@@ -231,11 +231,11 @@
         self->AllowThreadSuspension();
         const size_t ref_idx = inst->VRegA_11x(inst_data);
         Object* obj_result = shadow_frame.GetVRegReference(ref_idx);
-        if (do_assignability_check && obj_result != NULL) {
+        if (do_assignability_check && obj_result != nullptr) {
           Class* return_type = shadow_frame.GetMethod()->GetReturnType();
           // Re-load since it might have moved.
           obj_result = shadow_frame.GetVRegReference(ref_idx);
-          if (return_type == NULL) {
+          if (return_type == nullptr) {
             // Return the pending exception.
             HANDLE_PENDING_EXCEPTION();
           }
@@ -266,7 +266,7 @@
         int4_t val = inst->VRegB_11n(inst_data);
         shadow_frame.SetVReg(dst, val);
         if (val == 0) {
-          shadow_frame.SetVRegReference(dst, NULL);
+          shadow_frame.SetVRegReference(dst, nullptr);
         }
         inst = inst->Next_1xx();
         break;
@@ -277,7 +277,7 @@
         int16_t val = inst->VRegB_21s();
         shadow_frame.SetVReg(dst, val);
         if (val == 0) {
-          shadow_frame.SetVRegReference(dst, NULL);
+          shadow_frame.SetVRegReference(dst, nullptr);
         }
         inst = inst->Next_2xx();
         break;
@@ -288,7 +288,7 @@
         int32_t val = inst->VRegB_31i();
         shadow_frame.SetVReg(dst, val);
         if (val == 0) {
-          shadow_frame.SetVRegReference(dst, NULL);
+          shadow_frame.SetVRegReference(dst, nullptr);
         }
         inst = inst->Next_3xx();
         break;
@@ -299,7 +299,7 @@
         int32_t val = static_cast<int32_t>(inst->VRegB_21h() << 16);
         shadow_frame.SetVReg(dst, val);
         if (val == 0) {
-          shadow_frame.SetVRegReference(dst, NULL);
+          shadow_frame.SetVRegReference(dst, nullptr);
         }
         inst = inst->Next_2xx();
         break;
@@ -328,7 +328,7 @@
       case Instruction::CONST_STRING: {
         PREAMBLE();
         String* s = ResolveString(self, shadow_frame,  inst->VRegB_21c());
-        if (UNLIKELY(s == NULL)) {
+        if (UNLIKELY(s == nullptr)) {
           HANDLE_PENDING_EXCEPTION();
         } else {
           shadow_frame.SetVRegReference(inst->VRegA_21c(inst_data), s);
@@ -339,7 +339,7 @@
       case Instruction::CONST_STRING_JUMBO: {
         PREAMBLE();
         String* s = ResolveString(self, shadow_frame,  inst->VRegB_31c());
-        if (UNLIKELY(s == NULL)) {
+        if (UNLIKELY(s == nullptr)) {
           HANDLE_PENDING_EXCEPTION();
         } else {
           shadow_frame.SetVRegReference(inst->VRegA_31c(inst_data), s);
@@ -351,7 +351,7 @@
         PREAMBLE();
         Class* c = ResolveVerifyAndClinit(inst->VRegB_21c(), shadow_frame.GetMethod(),
                                           self, false, do_access_check);
-        if (UNLIKELY(c == NULL)) {
+        if (UNLIKELY(c == nullptr)) {
           HANDLE_PENDING_EXCEPTION();
         } else {
           shadow_frame.SetVRegReference(inst->VRegA_21c(inst_data), c);
@@ -362,7 +362,7 @@
       case Instruction::MONITOR_ENTER: {
         PREAMBLE();
         Object* obj = shadow_frame.GetVRegReference(inst->VRegA_11x(inst_data));
-        if (UNLIKELY(obj == NULL)) {
+        if (UNLIKELY(obj == nullptr)) {
           ThrowNullPointerExceptionFromInterpreter();
           HANDLE_PENDING_EXCEPTION();
         } else {
@@ -374,7 +374,7 @@
       case Instruction::MONITOR_EXIT: {
         PREAMBLE();
         Object* obj = shadow_frame.GetVRegReference(inst->VRegA_11x(inst_data));
-        if (UNLIKELY(obj == NULL)) {
+        if (UNLIKELY(obj == nullptr)) {
           ThrowNullPointerExceptionFromInterpreter();
           HANDLE_PENDING_EXCEPTION();
         } else {
@@ -387,11 +387,11 @@
         PREAMBLE();
         Class* c = ResolveVerifyAndClinit(inst->VRegB_21c(), shadow_frame.GetMethod(),
                                           self, false, do_access_check);
-        if (UNLIKELY(c == NULL)) {
+        if (UNLIKELY(c == nullptr)) {
           HANDLE_PENDING_EXCEPTION();
         } else {
           Object* obj = shadow_frame.GetVRegReference(inst->VRegA_21c(inst_data));
-          if (UNLIKELY(obj != NULL && !obj->InstanceOf(c))) {
+          if (UNLIKELY(obj != nullptr && !obj->InstanceOf(c))) {
             ThrowClassCastException(c, obj->GetClass());
             HANDLE_PENDING_EXCEPTION();
           } else {
@@ -404,11 +404,12 @@
         PREAMBLE();
         Class* c = ResolveVerifyAndClinit(inst->VRegC_22c(), shadow_frame.GetMethod(),
                                           self, false, do_access_check);
-        if (UNLIKELY(c == NULL)) {
+        if (UNLIKELY(c == nullptr)) {
           HANDLE_PENDING_EXCEPTION();
         } else {
           Object* obj = shadow_frame.GetVRegReference(inst->VRegB_22c(inst_data));
-          shadow_frame.SetVReg(inst->VRegA_22c(inst_data), (obj != NULL && obj->InstanceOf(c)) ? 1 : 0);
+          shadow_frame.SetVReg(inst->VRegA_22c(inst_data),
+                               (obj != nullptr && obj->InstanceOf(c)) ? 1 : 0);
           inst = inst->Next_2xx();
         }
         break;
@@ -416,7 +417,7 @@
       case Instruction::ARRAY_LENGTH:  {
         PREAMBLE();
         Object* array = shadow_frame.GetVRegReference(inst->VRegB_12x(inst_data));
-        if (UNLIKELY(array == NULL)) {
+        if (UNLIKELY(array == nullptr)) {
           ThrowNullPointerExceptionFromInterpreter();
           HANDLE_PENDING_EXCEPTION();
         } else {
@@ -431,7 +432,7 @@
         Object* obj = AllocObjectFromCode<do_access_check, true>(
             inst->VRegB_21c(), shadow_frame.GetMethod(), self,
             runtime->GetHeap()->GetCurrentAllocator());
-        if (UNLIKELY(obj == NULL)) {
+        if (UNLIKELY(obj == nullptr)) {
           HANDLE_PENDING_EXCEPTION();
         } else {
           obj->GetClass()->AssertInitializedOrInitializingInThread(self);
@@ -454,7 +455,7 @@
         Object* obj = AllocArrayFromCode<do_access_check, true>(
             inst->VRegC_22c(), length, shadow_frame.GetMethod(), self,
             Runtime::Current()->GetHeap()->GetCurrentAllocator());
-        if (UNLIKELY(obj == NULL)) {
+        if (UNLIKELY(obj == nullptr)) {
           HANDLE_PENDING_EXCEPTION();
         } else {
           shadow_frame.SetVRegReference(inst->VRegA_22c(inst_data), obj);
@@ -498,7 +499,7 @@
       case Instruction::THROW: {
         PREAMBLE();
         Object* exception = shadow_frame.GetVRegReference(inst->VRegA_11x(inst_data));
-        if (UNLIKELY(exception == NULL)) {
+        if (UNLIKELY(exception == nullptr)) {
           ThrowNullPointerException("throw with null exception");
         } else if (do_assignability_check && !exception->GetClass()->IsThrowableClass()) {
           // This should never happen.
@@ -651,7 +652,8 @@
       }
       case Instruction::IF_EQ: {
         PREAMBLE();
-        if (shadow_frame.GetVReg(inst->VRegA_22t(inst_data)) == shadow_frame.GetVReg(inst->VRegB_22t(inst_data))) {
+        if (shadow_frame.GetVReg(inst->VRegA_22t(inst_data)) ==
+            shadow_frame.GetVReg(inst->VRegB_22t(inst_data))) {
           int16_t offset = inst->VRegC_22t();
           if (IsBackwardBranch(offset)) {
             self->AllowThreadSuspension();
@@ -664,7 +666,8 @@
       }
       case Instruction::IF_NE: {
         PREAMBLE();
-        if (shadow_frame.GetVReg(inst->VRegA_22t(inst_data)) != shadow_frame.GetVReg(inst->VRegB_22t(inst_data))) {
+        if (shadow_frame.GetVReg(inst->VRegA_22t(inst_data)) !=
+            shadow_frame.GetVReg(inst->VRegB_22t(inst_data))) {
           int16_t offset = inst->VRegC_22t();
           if (IsBackwardBranch(offset)) {
             self->AllowThreadSuspension();
@@ -677,7 +680,8 @@
       }
       case Instruction::IF_LT: {
         PREAMBLE();
-        if (shadow_frame.GetVReg(inst->VRegA_22t(inst_data)) < shadow_frame.GetVReg(inst->VRegB_22t(inst_data))) {
+        if (shadow_frame.GetVReg(inst->VRegA_22t(inst_data)) <
+            shadow_frame.GetVReg(inst->VRegB_22t(inst_data))) {
           int16_t offset = inst->VRegC_22t();
           if (IsBackwardBranch(offset)) {
             self->AllowThreadSuspension();
@@ -690,7 +694,8 @@
       }
       case Instruction::IF_GE: {
         PREAMBLE();
-        if (shadow_frame.GetVReg(inst->VRegA_22t(inst_data)) >= shadow_frame.GetVReg(inst->VRegB_22t(inst_data))) {
+        if (shadow_frame.GetVReg(inst->VRegA_22t(inst_data)) >=
+            shadow_frame.GetVReg(inst->VRegB_22t(inst_data))) {
           int16_t offset = inst->VRegC_22t();
           if (IsBackwardBranch(offset)) {
             self->AllowThreadSuspension();
@@ -703,7 +708,8 @@
       }
       case Instruction::IF_GT: {
         PREAMBLE();
-        if (shadow_frame.GetVReg(inst->VRegA_22t(inst_data)) > shadow_frame.GetVReg(inst->VRegB_22t(inst_data))) {
+        if (shadow_frame.GetVReg(inst->VRegA_22t(inst_data)) >
+        shadow_frame.GetVReg(inst->VRegB_22t(inst_data))) {
           int16_t offset = inst->VRegC_22t();
           if (IsBackwardBranch(offset)) {
             self->AllowThreadSuspension();
@@ -716,7 +722,8 @@
       }
       case Instruction::IF_LE: {
         PREAMBLE();
-        if (shadow_frame.GetVReg(inst->VRegA_22t(inst_data)) <= shadow_frame.GetVReg(inst->VRegB_22t(inst_data))) {
+        if (shadow_frame.GetVReg(inst->VRegA_22t(inst_data)) <=
+            shadow_frame.GetVReg(inst->VRegB_22t(inst_data))) {
           int16_t offset = inst->VRegC_22t();
           if (IsBackwardBranch(offset)) {
             self->AllowThreadSuspension();
@@ -808,7 +815,7 @@
       case Instruction::AGET_BOOLEAN: {
         PREAMBLE();
         Object* a = shadow_frame.GetVRegReference(inst->VRegB_23x());
-        if (UNLIKELY(a == NULL)) {
+        if (UNLIKELY(a == nullptr)) {
           ThrowNullPointerExceptionFromInterpreter();
           HANDLE_PENDING_EXCEPTION();
           break;
@@ -826,7 +833,7 @@
       case Instruction::AGET_BYTE: {
         PREAMBLE();
         Object* a = shadow_frame.GetVRegReference(inst->VRegB_23x());
-        if (UNLIKELY(a == NULL)) {
+        if (UNLIKELY(a == nullptr)) {
           ThrowNullPointerExceptionFromInterpreter();
           HANDLE_PENDING_EXCEPTION();
           break;
@@ -844,7 +851,7 @@
       case Instruction::AGET_CHAR: {
         PREAMBLE();
         Object* a = shadow_frame.GetVRegReference(inst->VRegB_23x());
-        if (UNLIKELY(a == NULL)) {
+        if (UNLIKELY(a == nullptr)) {
           ThrowNullPointerExceptionFromInterpreter();
           HANDLE_PENDING_EXCEPTION();
           break;
@@ -862,7 +869,7 @@
       case Instruction::AGET_SHORT: {
         PREAMBLE();
         Object* a = shadow_frame.GetVRegReference(inst->VRegB_23x());
-        if (UNLIKELY(a == NULL)) {
+        if (UNLIKELY(a == nullptr)) {
           ThrowNullPointerExceptionFromInterpreter();
           HANDLE_PENDING_EXCEPTION();
           break;
@@ -880,7 +887,7 @@
       case Instruction::AGET: {
         PREAMBLE();
         Object* a = shadow_frame.GetVRegReference(inst->VRegB_23x());
-        if (UNLIKELY(a == NULL)) {
+        if (UNLIKELY(a == nullptr)) {
           ThrowNullPointerExceptionFromInterpreter();
           HANDLE_PENDING_EXCEPTION();
           break;
@@ -898,7 +905,7 @@
       case Instruction::AGET_WIDE:  {
         PREAMBLE();
         Object* a = shadow_frame.GetVRegReference(inst->VRegB_23x());
-        if (UNLIKELY(a == NULL)) {
+        if (UNLIKELY(a == nullptr)) {
           ThrowNullPointerExceptionFromInterpreter();
           HANDLE_PENDING_EXCEPTION();
           break;
@@ -916,7 +923,7 @@
       case Instruction::AGET_OBJECT: {
         PREAMBLE();
         Object* a = shadow_frame.GetVRegReference(inst->VRegB_23x());
-        if (UNLIKELY(a == NULL)) {
+        if (UNLIKELY(a == nullptr)) {
           ThrowNullPointerExceptionFromInterpreter();
           HANDLE_PENDING_EXCEPTION();
           break;
@@ -934,7 +941,7 @@
       case Instruction::APUT_BOOLEAN: {
         PREAMBLE();
         Object* a = shadow_frame.GetVRegReference(inst->VRegB_23x());
-        if (UNLIKELY(a == NULL)) {
+        if (UNLIKELY(a == nullptr)) {
           ThrowNullPointerExceptionFromInterpreter();
           HANDLE_PENDING_EXCEPTION();
           break;
@@ -953,7 +960,7 @@
       case Instruction::APUT_BYTE: {
         PREAMBLE();
         Object* a = shadow_frame.GetVRegReference(inst->VRegB_23x());
-        if (UNLIKELY(a == NULL)) {
+        if (UNLIKELY(a == nullptr)) {
           ThrowNullPointerExceptionFromInterpreter();
           HANDLE_PENDING_EXCEPTION();
           break;
@@ -972,7 +979,7 @@
       case Instruction::APUT_CHAR: {
         PREAMBLE();
         Object* a = shadow_frame.GetVRegReference(inst->VRegB_23x());
-        if (UNLIKELY(a == NULL)) {
+        if (UNLIKELY(a == nullptr)) {
           ThrowNullPointerExceptionFromInterpreter();
           HANDLE_PENDING_EXCEPTION();
           break;
@@ -991,7 +998,7 @@
       case Instruction::APUT_SHORT: {
         PREAMBLE();
         Object* a = shadow_frame.GetVRegReference(inst->VRegB_23x());
-        if (UNLIKELY(a == NULL)) {
+        if (UNLIKELY(a == nullptr)) {
           ThrowNullPointerExceptionFromInterpreter();
           HANDLE_PENDING_EXCEPTION();
           break;
@@ -1010,7 +1017,7 @@
       case Instruction::APUT: {
         PREAMBLE();
         Object* a = shadow_frame.GetVRegReference(inst->VRegB_23x());
-        if (UNLIKELY(a == NULL)) {
+        if (UNLIKELY(a == nullptr)) {
           ThrowNullPointerExceptionFromInterpreter();
           HANDLE_PENDING_EXCEPTION();
           break;
@@ -1029,7 +1036,7 @@
       case Instruction::APUT_WIDE: {
         PREAMBLE();
         Object* a = shadow_frame.GetVRegReference(inst->VRegB_23x());
-        if (UNLIKELY(a == NULL)) {
+        if (UNLIKELY(a == nullptr)) {
           ThrowNullPointerExceptionFromInterpreter();
           HANDLE_PENDING_EXCEPTION();
           break;
@@ -1048,7 +1055,7 @@
       case Instruction::APUT_OBJECT: {
         PREAMBLE();
         Object* a = shadow_frame.GetVRegReference(inst->VRegB_23x());
-        if (UNLIKELY(a == NULL)) {
+        if (UNLIKELY(a == nullptr)) {
           ThrowNullPointerExceptionFromInterpreter();
           HANDLE_PENDING_EXCEPTION();
           break;
@@ -1066,43 +1073,50 @@
       }
       case Instruction::IGET_BOOLEAN: {
         PREAMBLE();
-        bool success = DoFieldGet<InstancePrimitiveRead, Primitive::kPrimBoolean, do_access_check>(self, shadow_frame, inst, inst_data);
+        bool success = DoFieldGet<InstancePrimitiveRead, Primitive::kPrimBoolean, do_access_check>(
+            self, shadow_frame, inst, inst_data);
         POSSIBLY_HANDLE_PENDING_EXCEPTION(!success, Next_2xx);
         break;
       }
       case Instruction::IGET_BYTE: {
         PREAMBLE();
-        bool success = DoFieldGet<InstancePrimitiveRead, Primitive::kPrimByte, do_access_check>(self, shadow_frame, inst, inst_data);
+        bool success = DoFieldGet<InstancePrimitiveRead, Primitive::kPrimByte, do_access_check>(
+            self, shadow_frame, inst, inst_data);
         POSSIBLY_HANDLE_PENDING_EXCEPTION(!success, Next_2xx);
         break;
       }
       case Instruction::IGET_CHAR: {
         PREAMBLE();
-        bool success = DoFieldGet<InstancePrimitiveRead, Primitive::kPrimChar, do_access_check>(self, shadow_frame, inst, inst_data);
+        bool success = DoFieldGet<InstancePrimitiveRead, Primitive::kPrimChar, do_access_check>(
+            self, shadow_frame, inst, inst_data);
         POSSIBLY_HANDLE_PENDING_EXCEPTION(!success, Next_2xx);
         break;
       }
       case Instruction::IGET_SHORT: {
         PREAMBLE();
-        bool success = DoFieldGet<InstancePrimitiveRead, Primitive::kPrimShort, do_access_check>(self, shadow_frame, inst, inst_data);
+        bool success = DoFieldGet<InstancePrimitiveRead, Primitive::kPrimShort, do_access_check>(
+            self, shadow_frame, inst, inst_data);
         POSSIBLY_HANDLE_PENDING_EXCEPTION(!success, Next_2xx);
         break;
       }
       case Instruction::IGET: {
         PREAMBLE();
-        bool success = DoFieldGet<InstancePrimitiveRead, Primitive::kPrimInt, do_access_check>(self, shadow_frame, inst, inst_data);
+        bool success = DoFieldGet<InstancePrimitiveRead, Primitive::kPrimInt, do_access_check>(
+            self, shadow_frame, inst, inst_data);
         POSSIBLY_HANDLE_PENDING_EXCEPTION(!success, Next_2xx);
         break;
       }
       case Instruction::IGET_WIDE: {
         PREAMBLE();
-        bool success = DoFieldGet<InstancePrimitiveRead, Primitive::kPrimLong, do_access_check>(self, shadow_frame, inst, inst_data);
+        bool success = DoFieldGet<InstancePrimitiveRead, Primitive::kPrimLong, do_access_check>(
+            self, shadow_frame, inst, inst_data);
         POSSIBLY_HANDLE_PENDING_EXCEPTION(!success, Next_2xx);
         break;
       }
       case Instruction::IGET_OBJECT: {
         PREAMBLE();
-        bool success = DoFieldGet<InstanceObjectRead, Primitive::kPrimNot, do_access_check>(self, shadow_frame, inst, inst_data);
+        bool success = DoFieldGet<InstanceObjectRead, Primitive::kPrimNot, do_access_check>(
+            self, shadow_frame, inst, inst_data);
         POSSIBLY_HANDLE_PENDING_EXCEPTION(!success, Next_2xx);
         break;
       }
@@ -1150,272 +1164,318 @@
       }
       case Instruction::SGET_BOOLEAN: {
         PREAMBLE();
-        bool success = DoFieldGet<StaticPrimitiveRead, Primitive::kPrimBoolean, do_access_check>(self, shadow_frame, inst, inst_data);
+        bool success = DoFieldGet<StaticPrimitiveRead, Primitive::kPrimBoolean, do_access_check>(
+            self, shadow_frame, inst, inst_data);
         POSSIBLY_HANDLE_PENDING_EXCEPTION(!success, Next_2xx);
         break;
       }
       case Instruction::SGET_BYTE: {
         PREAMBLE();
-        bool success = DoFieldGet<StaticPrimitiveRead, Primitive::kPrimByte, do_access_check>(self, shadow_frame, inst, inst_data);
+        bool success = DoFieldGet<StaticPrimitiveRead, Primitive::kPrimByte, do_access_check>(
+            self, shadow_frame, inst, inst_data);
         POSSIBLY_HANDLE_PENDING_EXCEPTION(!success, Next_2xx);
         break;
       }
       case Instruction::SGET_CHAR: {
         PREAMBLE();
-        bool success = DoFieldGet<StaticPrimitiveRead, Primitive::kPrimChar, do_access_check>(self, shadow_frame, inst, inst_data);
+        bool success = DoFieldGet<StaticPrimitiveRead, Primitive::kPrimChar, do_access_check>(
+            self, shadow_frame, inst, inst_data);
         POSSIBLY_HANDLE_PENDING_EXCEPTION(!success, Next_2xx);
         break;
       }
       case Instruction::SGET_SHORT: {
         PREAMBLE();
-        bool success = DoFieldGet<StaticPrimitiveRead, Primitive::kPrimShort, do_access_check>(self, shadow_frame, inst, inst_data);
+        bool success = DoFieldGet<StaticPrimitiveRead, Primitive::kPrimShort, do_access_check>(
+            self, shadow_frame, inst, inst_data);
         POSSIBLY_HANDLE_PENDING_EXCEPTION(!success, Next_2xx);
         break;
       }
       case Instruction::SGET: {
         PREAMBLE();
-        bool success = DoFieldGet<StaticPrimitiveRead, Primitive::kPrimInt, do_access_check>(self, shadow_frame, inst, inst_data);
+        bool success = DoFieldGet<StaticPrimitiveRead, Primitive::kPrimInt, do_access_check>(
+            self, shadow_frame, inst, inst_data);
         POSSIBLY_HANDLE_PENDING_EXCEPTION(!success, Next_2xx);
         break;
       }
       case Instruction::SGET_WIDE: {
         PREAMBLE();
-        bool success = DoFieldGet<StaticPrimitiveRead, Primitive::kPrimLong, do_access_check>(self, shadow_frame, inst, inst_data);
+        bool success = DoFieldGet<StaticPrimitiveRead, Primitive::kPrimLong, do_access_check>(
+            self, shadow_frame, inst, inst_data);
         POSSIBLY_HANDLE_PENDING_EXCEPTION(!success, Next_2xx);
         break;
       }
       case Instruction::SGET_OBJECT: {
         PREAMBLE();
-        bool success = DoFieldGet<StaticObjectRead, Primitive::kPrimNot, do_access_check>(self, shadow_frame, inst, inst_data);
+        bool success = DoFieldGet<StaticObjectRead, Primitive::kPrimNot, do_access_check>(
+            self, shadow_frame, inst, inst_data);
         POSSIBLY_HANDLE_PENDING_EXCEPTION(!success, Next_2xx);
         break;
       }
       case Instruction::IPUT_BOOLEAN: {
         PREAMBLE();
-        bool success = DoFieldPut<InstancePrimitiveWrite, Primitive::kPrimBoolean, do_access_check, transaction_active>(self, shadow_frame, inst, inst_data);
+        bool success = DoFieldPut<InstancePrimitiveWrite, Primitive::kPrimBoolean, do_access_check,
+            transaction_active>(self, shadow_frame, inst, inst_data);
         POSSIBLY_HANDLE_PENDING_EXCEPTION(!success, Next_2xx);
         break;
       }
       case Instruction::IPUT_BYTE: {
         PREAMBLE();
-        bool success = DoFieldPut<InstancePrimitiveWrite, Primitive::kPrimByte, do_access_check, transaction_active>(self, shadow_frame, inst, inst_data);
+        bool success = DoFieldPut<InstancePrimitiveWrite, Primitive::kPrimByte, do_access_check,
+            transaction_active>(self, shadow_frame, inst, inst_data);
         POSSIBLY_HANDLE_PENDING_EXCEPTION(!success, Next_2xx);
         break;
       }
       case Instruction::IPUT_CHAR: {
         PREAMBLE();
-        bool success = DoFieldPut<InstancePrimitiveWrite, Primitive::kPrimChar, do_access_check, transaction_active>(self, shadow_frame, inst, inst_data);
+        bool success = DoFieldPut<InstancePrimitiveWrite, Primitive::kPrimChar, do_access_check,
+            transaction_active>(self, shadow_frame, inst, inst_data);
         POSSIBLY_HANDLE_PENDING_EXCEPTION(!success, Next_2xx);
         break;
       }
       case Instruction::IPUT_SHORT: {
         PREAMBLE();
-        bool success = DoFieldPut<InstancePrimitiveWrite, Primitive::kPrimShort, do_access_check, transaction_active>(self, shadow_frame, inst, inst_data);
+        bool success = DoFieldPut<InstancePrimitiveWrite, Primitive::kPrimShort, do_access_check,
+            transaction_active>(self, shadow_frame, inst, inst_data);
         POSSIBLY_HANDLE_PENDING_EXCEPTION(!success, Next_2xx);
         break;
       }
       case Instruction::IPUT: {
         PREAMBLE();
-        bool success = DoFieldPut<InstancePrimitiveWrite, Primitive::kPrimInt, do_access_check, transaction_active>(self, shadow_frame, inst, inst_data);
+        bool success = DoFieldPut<InstancePrimitiveWrite, Primitive::kPrimInt, do_access_check,
+            transaction_active>(self, shadow_frame, inst, inst_data);
         POSSIBLY_HANDLE_PENDING_EXCEPTION(!success, Next_2xx);
         break;
       }
       case Instruction::IPUT_WIDE: {
         PREAMBLE();
-        bool success = DoFieldPut<InstancePrimitiveWrite, Primitive::kPrimLong, do_access_check, transaction_active>(self, shadow_frame, inst, inst_data);
+        bool success = DoFieldPut<InstancePrimitiveWrite, Primitive::kPrimLong, do_access_check,
+            transaction_active>(self, shadow_frame, inst, inst_data);
         POSSIBLY_HANDLE_PENDING_EXCEPTION(!success, Next_2xx);
         break;
       }
       case Instruction::IPUT_OBJECT: {
         PREAMBLE();
-        bool success = DoFieldPut<InstanceObjectWrite, Primitive::kPrimNot, do_access_check, transaction_active>(self, shadow_frame, inst, inst_data);
+        bool success = DoFieldPut<InstanceObjectWrite, Primitive::kPrimNot, do_access_check,
+            transaction_active>(self, shadow_frame, inst, inst_data);
         POSSIBLY_HANDLE_PENDING_EXCEPTION(!success, Next_2xx);
         break;
       }
       case Instruction::IPUT_QUICK: {
         PREAMBLE();
-        bool success = DoIPutQuick<Primitive::kPrimInt, transaction_active>(shadow_frame, inst, inst_data);
+        bool success = DoIPutQuick<Primitive::kPrimInt, transaction_active>(
+            shadow_frame, inst, inst_data);
         POSSIBLY_HANDLE_PENDING_EXCEPTION(!success, Next_2xx);
         break;
       }
       case Instruction::IPUT_BOOLEAN_QUICK: {
         PREAMBLE();
-        bool success = DoIPutQuick<Primitive::kPrimBoolean, transaction_active>(shadow_frame, inst, inst_data);
+        bool success = DoIPutQuick<Primitive::kPrimBoolean, transaction_active>(
+            shadow_frame, inst, inst_data);
         POSSIBLY_HANDLE_PENDING_EXCEPTION(!success, Next_2xx);
         break;
       }
       case Instruction::IPUT_BYTE_QUICK: {
         PREAMBLE();
-        bool success = DoIPutQuick<Primitive::kPrimByte, transaction_active>(shadow_frame, inst, inst_data);
+        bool success = DoIPutQuick<Primitive::kPrimByte, transaction_active>(
+            shadow_frame, inst, inst_data);
         POSSIBLY_HANDLE_PENDING_EXCEPTION(!success, Next_2xx);
         break;
       }
       case Instruction::IPUT_CHAR_QUICK: {
         PREAMBLE();
-        bool success = DoIPutQuick<Primitive::kPrimChar, transaction_active>(shadow_frame, inst, inst_data);
+        bool success = DoIPutQuick<Primitive::kPrimChar, transaction_active>(
+            shadow_frame, inst, inst_data);
         POSSIBLY_HANDLE_PENDING_EXCEPTION(!success, Next_2xx);
         break;
       }
       case Instruction::IPUT_SHORT_QUICK: {
         PREAMBLE();
-        bool success = DoIPutQuick<Primitive::kPrimShort, transaction_active>(shadow_frame, inst, inst_data);
+        bool success = DoIPutQuick<Primitive::kPrimShort, transaction_active>(
+            shadow_frame, inst, inst_data);
         POSSIBLY_HANDLE_PENDING_EXCEPTION(!success, Next_2xx);
         break;
       }
       case Instruction::IPUT_WIDE_QUICK: {
         PREAMBLE();
-        bool success = DoIPutQuick<Primitive::kPrimLong, transaction_active>(shadow_frame, inst, inst_data);
+        bool success = DoIPutQuick<Primitive::kPrimLong, transaction_active>(
+            shadow_frame, inst, inst_data);
         POSSIBLY_HANDLE_PENDING_EXCEPTION(!success, Next_2xx);
         break;
       }
       case Instruction::IPUT_OBJECT_QUICK: {
         PREAMBLE();
-        bool success = DoIPutQuick<Primitive::kPrimNot, transaction_active>(shadow_frame, inst, inst_data);
+        bool success = DoIPutQuick<Primitive::kPrimNot, transaction_active>(
+            shadow_frame, inst, inst_data);
         POSSIBLY_HANDLE_PENDING_EXCEPTION(!success, Next_2xx);
         break;
       }
       case Instruction::SPUT_BOOLEAN: {
         PREAMBLE();
-        bool success = DoFieldPut<StaticPrimitiveWrite, Primitive::kPrimBoolean, do_access_check, transaction_active>(self, shadow_frame, inst, inst_data);
+        bool success = DoFieldPut<StaticPrimitiveWrite, Primitive::kPrimBoolean, do_access_check,
+            transaction_active>(self, shadow_frame, inst, inst_data);
         POSSIBLY_HANDLE_PENDING_EXCEPTION(!success, Next_2xx);
         break;
       }
       case Instruction::SPUT_BYTE: {
         PREAMBLE();
-        bool success = DoFieldPut<StaticPrimitiveWrite, Primitive::kPrimByte, do_access_check, transaction_active>(self, shadow_frame, inst, inst_data);
+        bool success = DoFieldPut<StaticPrimitiveWrite, Primitive::kPrimByte, do_access_check,
+            transaction_active>(self, shadow_frame, inst, inst_data);
         POSSIBLY_HANDLE_PENDING_EXCEPTION(!success, Next_2xx);
         break;
       }
       case Instruction::SPUT_CHAR: {
         PREAMBLE();
-        bool success = DoFieldPut<StaticPrimitiveWrite, Primitive::kPrimChar, do_access_check, transaction_active>(self, shadow_frame, inst, inst_data);
+        bool success = DoFieldPut<StaticPrimitiveWrite, Primitive::kPrimChar, do_access_check,
+            transaction_active>(self, shadow_frame, inst, inst_data);
         POSSIBLY_HANDLE_PENDING_EXCEPTION(!success, Next_2xx);
         break;
       }
       case Instruction::SPUT_SHORT: {
         PREAMBLE();
-        bool success = DoFieldPut<StaticPrimitiveWrite, Primitive::kPrimShort, do_access_check, transaction_active>(self, shadow_frame, inst, inst_data);
+        bool success = DoFieldPut<StaticPrimitiveWrite, Primitive::kPrimShort, do_access_check,
+            transaction_active>(self, shadow_frame, inst, inst_data);
         POSSIBLY_HANDLE_PENDING_EXCEPTION(!success, Next_2xx);
         break;
       }
       case Instruction::SPUT: {
         PREAMBLE();
-        bool success = DoFieldPut<StaticPrimitiveWrite, Primitive::kPrimInt, do_access_check, transaction_active>(self, shadow_frame, inst, inst_data);
+        bool success = DoFieldPut<StaticPrimitiveWrite, Primitive::kPrimInt, do_access_check,
+            transaction_active>(self, shadow_frame, inst, inst_data);
         POSSIBLY_HANDLE_PENDING_EXCEPTION(!success, Next_2xx);
         break;
       }
       case Instruction::SPUT_WIDE: {
         PREAMBLE();
-        bool success = DoFieldPut<StaticPrimitiveWrite, Primitive::kPrimLong, do_access_check, transaction_active>(self, shadow_frame, inst, inst_data);
+        bool success = DoFieldPut<StaticPrimitiveWrite, Primitive::kPrimLong, do_access_check,
+            transaction_active>(self, shadow_frame, inst, inst_data);
         POSSIBLY_HANDLE_PENDING_EXCEPTION(!success, Next_2xx);
         break;
       }
       case Instruction::SPUT_OBJECT: {
         PREAMBLE();
-        bool success = DoFieldPut<StaticObjectWrite, Primitive::kPrimNot, do_access_check, transaction_active>(self, shadow_frame, inst, inst_data);
+        bool success = DoFieldPut<StaticObjectWrite, Primitive::kPrimNot, do_access_check,
+            transaction_active>(self, shadow_frame, inst, inst_data);
         POSSIBLY_HANDLE_PENDING_EXCEPTION(!success, Next_2xx);
         break;
       }
       case Instruction::INVOKE_VIRTUAL: {
         PREAMBLE();
-        bool success = DoInvoke<kVirtual, false, do_access_check>(self, shadow_frame, inst, inst_data, &result_register);
+        bool success = DoInvoke<kVirtual, false, do_access_check>(
+            self, shadow_frame, inst, inst_data, &result_register);
         POSSIBLY_HANDLE_PENDING_EXCEPTION(!success, Next_3xx);
         break;
       }
       case Instruction::INVOKE_VIRTUAL_RANGE: {
         PREAMBLE();
-        bool success = DoInvoke<kVirtual, true, do_access_check>(self, shadow_frame, inst, inst_data, &result_register);
+        bool success = DoInvoke<kVirtual, true, do_access_check>(
+            self, shadow_frame, inst, inst_data, &result_register);
         POSSIBLY_HANDLE_PENDING_EXCEPTION(!success, Next_3xx);
         break;
       }
       case Instruction::INVOKE_SUPER: {
         PREAMBLE();
-        bool success = DoInvoke<kSuper, false, do_access_check>(self, shadow_frame, inst, inst_data, &result_register);
+        bool success = DoInvoke<kSuper, false, do_access_check>(
+            self, shadow_frame, inst, inst_data, &result_register);
         POSSIBLY_HANDLE_PENDING_EXCEPTION(!success, Next_3xx);
         break;
       }
       case Instruction::INVOKE_SUPER_RANGE: {
         PREAMBLE();
-        bool success = DoInvoke<kSuper, true, do_access_check>(self, shadow_frame, inst, inst_data, &result_register);
+        bool success = DoInvoke<kSuper, true, do_access_check>(
+            self, shadow_frame, inst, inst_data, &result_register);
         POSSIBLY_HANDLE_PENDING_EXCEPTION(!success, Next_3xx);
         break;
       }
       case Instruction::INVOKE_DIRECT: {
         PREAMBLE();
-        bool success = DoInvoke<kDirect, false, do_access_check>(self, shadow_frame, inst, inst_data, &result_register);
+        bool success = DoInvoke<kDirect, false, do_access_check>(
+            self, shadow_frame, inst, inst_data, &result_register);
         POSSIBLY_HANDLE_PENDING_EXCEPTION(!success, Next_3xx);
         break;
       }
       case Instruction::INVOKE_DIRECT_RANGE: {
         PREAMBLE();
-        bool success = DoInvoke<kDirect, true, do_access_check>(self, shadow_frame, inst, inst_data, &result_register);
+        bool success = DoInvoke<kDirect, true, do_access_check>(
+            self, shadow_frame, inst, inst_data, &result_register);
         POSSIBLY_HANDLE_PENDING_EXCEPTION(!success, Next_3xx);
         break;
       }
       case Instruction::INVOKE_INTERFACE: {
         PREAMBLE();
-        bool success = DoInvoke<kInterface, false, do_access_check>(self, shadow_frame, inst, inst_data, &result_register);
+        bool success = DoInvoke<kInterface, false, do_access_check>(
+            self, shadow_frame, inst, inst_data, &result_register);
         POSSIBLY_HANDLE_PENDING_EXCEPTION(!success, Next_3xx);
         break;
       }
       case Instruction::INVOKE_INTERFACE_RANGE: {
         PREAMBLE();
-        bool success = DoInvoke<kInterface, true, do_access_check>(self, shadow_frame, inst, inst_data, &result_register);
+        bool success = DoInvoke<kInterface, true, do_access_check>(
+            self, shadow_frame, inst, inst_data, &result_register);
         POSSIBLY_HANDLE_PENDING_EXCEPTION(!success, Next_3xx);
         break;
       }
       case Instruction::INVOKE_STATIC: {
         PREAMBLE();
-        bool success = DoInvoke<kStatic, false, do_access_check>(self, shadow_frame, inst, inst_data, &result_register);
+        bool success = DoInvoke<kStatic, false, do_access_check>(
+            self, shadow_frame, inst, inst_data, &result_register);
         POSSIBLY_HANDLE_PENDING_EXCEPTION(!success, Next_3xx);
         break;
       }
       case Instruction::INVOKE_STATIC_RANGE: {
         PREAMBLE();
-        bool success = DoInvoke<kStatic, true, do_access_check>(self, shadow_frame, inst, inst_data, &result_register);
+        bool success = DoInvoke<kStatic, true, do_access_check>(
+            self, shadow_frame, inst, inst_data, &result_register);
         POSSIBLY_HANDLE_PENDING_EXCEPTION(!success, Next_3xx);
         break;
       }
       case Instruction::INVOKE_VIRTUAL_QUICK: {
         PREAMBLE();
-        bool success = DoInvokeVirtualQuick<false>(self, shadow_frame, inst, inst_data, &result_register);
+        bool success = DoInvokeVirtualQuick<false>(
+            self, shadow_frame, inst, inst_data, &result_register);
         POSSIBLY_HANDLE_PENDING_EXCEPTION(!success, Next_3xx);
         break;
       }
       case Instruction::INVOKE_VIRTUAL_RANGE_QUICK: {
         PREAMBLE();
-        bool success = DoInvokeVirtualQuick<true>(self, shadow_frame, inst, inst_data, &result_register);
+        bool success = DoInvokeVirtualQuick<true>(
+            self, shadow_frame, inst, inst_data, &result_register);
         POSSIBLY_HANDLE_PENDING_EXCEPTION(!success, Next_3xx);
         break;
       }
       case Instruction::NEG_INT:
         PREAMBLE();
-        shadow_frame.SetVReg(inst->VRegA_12x(inst_data), -shadow_frame.GetVReg(inst->VRegB_12x(inst_data)));
+        shadow_frame.SetVReg(
+            inst->VRegA_12x(inst_data), -shadow_frame.GetVReg(inst->VRegB_12x(inst_data)));
         inst = inst->Next_1xx();
         break;
       case Instruction::NOT_INT:
         PREAMBLE();
-        shadow_frame.SetVReg(inst->VRegA_12x(inst_data), ~shadow_frame.GetVReg(inst->VRegB_12x(inst_data)));
+        shadow_frame.SetVReg(
+            inst->VRegA_12x(inst_data), ~shadow_frame.GetVReg(inst->VRegB_12x(inst_data)));
         inst = inst->Next_1xx();
         break;
       case Instruction::NEG_LONG:
         PREAMBLE();
-        shadow_frame.SetVRegLong(inst->VRegA_12x(inst_data), -shadow_frame.GetVRegLong(inst->VRegB_12x(inst_data)));
+        shadow_frame.SetVRegLong(
+            inst->VRegA_12x(inst_data), -shadow_frame.GetVRegLong(inst->VRegB_12x(inst_data)));
         inst = inst->Next_1xx();
         break;
       case Instruction::NOT_LONG:
         PREAMBLE();
-        shadow_frame.SetVRegLong(inst->VRegA_12x(inst_data), ~shadow_frame.GetVRegLong(inst->VRegB_12x(inst_data)));
+        shadow_frame.SetVRegLong(
+            inst->VRegA_12x(inst_data), ~shadow_frame.GetVRegLong(inst->VRegB_12x(inst_data)));
         inst = inst->Next_1xx();
         break;
       case Instruction::NEG_FLOAT:
         PREAMBLE();
-        shadow_frame.SetVRegFloat(inst->VRegA_12x(inst_data), -shadow_frame.GetVRegFloat(inst->VRegB_12x(inst_data)));
+        shadow_frame.SetVRegFloat(
+            inst->VRegA_12x(inst_data), -shadow_frame.GetVRegFloat(inst->VRegB_12x(inst_data)));
         inst = inst->Next_1xx();
         break;
       case Instruction::NEG_DOUBLE:
         PREAMBLE();
-        shadow_frame.SetVRegDouble(inst->VRegA_12x(inst_data), -shadow_frame.GetVRegDouble(inst->VRegB_12x(inst_data)));
+        shadow_frame.SetVRegDouble(
+            inst->VRegA_12x(inst_data), -shadow_frame.GetVRegDouble(inst->VRegB_12x(inst_data)));
         inst = inst->Next_1xx();
         break;
       case Instruction::INT_TO_LONG:
@@ -1500,20 +1560,20 @@
         break;
       case Instruction::INT_TO_BYTE:
         PREAMBLE();
-        shadow_frame.SetVReg(inst->VRegA_12x(inst_data),
-                             static_cast<int8_t>(shadow_frame.GetVReg(inst->VRegB_12x(inst_data))));
+        shadow_frame.SetVReg(inst->VRegA_12x(inst_data), static_cast<int8_t>(
+            shadow_frame.GetVReg(inst->VRegB_12x(inst_data))));
         inst = inst->Next_1xx();
         break;
       case Instruction::INT_TO_CHAR:
         PREAMBLE();
-        shadow_frame.SetVReg(inst->VRegA_12x(inst_data),
-                             static_cast<uint16_t>(shadow_frame.GetVReg(inst->VRegB_12x(inst_data))));
+        shadow_frame.SetVReg(inst->VRegA_12x(inst_data), static_cast<uint16_t>(
+            shadow_frame.GetVReg(inst->VRegB_12x(inst_data))));
         inst = inst->Next_1xx();
         break;
       case Instruction::INT_TO_SHORT:
         PREAMBLE();
-        shadow_frame.SetVReg(inst->VRegA_12x(inst_data),
-                             static_cast<int16_t>(shadow_frame.GetVReg(inst->VRegB_12x(inst_data))));
+        shadow_frame.SetVReg(inst->VRegA_12x(inst_data), static_cast<int16_t>(
+            shadow_frame.GetVReg(inst->VRegB_12x(inst_data))));
         inst = inst->Next_1xx();
         break;
       case Instruction::ADD_INT: {
@@ -2050,14 +2110,16 @@
       case Instruction::DIV_INT_LIT16: {
         PREAMBLE();
         bool success = DoIntDivide(shadow_frame, inst->VRegA_22s(inst_data),
-                                   shadow_frame.GetVReg(inst->VRegB_22s(inst_data)), inst->VRegC_22s());
+                                   shadow_frame.GetVReg(inst->VRegB_22s(inst_data)),
+                                   inst->VRegC_22s());
         POSSIBLY_HANDLE_PENDING_EXCEPTION(!success, Next_2xx);
         break;
       }
       case Instruction::REM_INT_LIT16: {
         PREAMBLE();
         bool success = DoIntRemainder(shadow_frame, inst->VRegA_22s(inst_data),
-                                      shadow_frame.GetVReg(inst->VRegB_22s(inst_data)), inst->VRegC_22s());
+                                      shadow_frame.GetVReg(inst->VRegB_22s(inst_data)),
+                                      inst->VRegC_22s());
         POSSIBLY_HANDLE_PENDING_EXCEPTION(!success, Next_2xx);
         break;
       }
diff --git a/runtime/interpreter/unstarted_runtime.cc b/runtime/interpreter/unstarted_runtime.cc
index 61def35..f30c93a 100644
--- a/runtime/interpreter/unstarted_runtime.cc
+++ b/runtime/interpreter/unstarted_runtime.cc
@@ -915,7 +915,7 @@
   Runtime* runtime = Runtime::Current();
   ClassLinker* class_linker = runtime->GetClassLinker();
   mirror::Class* array_class = class_linker->FindArrayClass(self, &element_class);
-  if (UNLIKELY(array_class == NULL)) {
+  if (UNLIKELY(array_class == nullptr)) {
     CHECK(self->IsExceptionPending());
     return;
   }
diff --git a/runtime/jdwp/jdwp.h b/runtime/jdwp/jdwp.h
index 8dffee6..55441c9 100644
--- a/runtime/jdwp/jdwp.h
+++ b/runtime/jdwp/jdwp.h
@@ -127,7 +127,7 @@
    * Among other things, this binds to a port to listen for a connection from
    * the debugger.
    *
-   * Returns a newly-allocated JdwpState struct on success, or NULL on failure.
+   * Returns a newly-allocated JdwpState struct on success, or nullptr on failure.
    */
   static JdwpState* Create(const JdwpOptions* options)
       LOCKS_EXCLUDED(Locks::mutator_lock_);
diff --git a/runtime/jdwp/jdwp_expand_buf.cc b/runtime/jdwp/jdwp_expand_buf.cc
index cc85cdd..e492d7e 100644
--- a/runtime/jdwp/jdwp_expand_buf.cc
+++ b/runtime/jdwp/jdwp_expand_buf.cc
@@ -156,7 +156,7 @@
 }
 
 /*
- * Add a UTF8 string as a 4-byte length followed by a non-NULL-terminated
+ * Add a UTF8 string as a 4-byte length followed by a non-nullptr-terminated
  * string.
  *
  * Because these strings are coming out of the VM, it's safe to assume that
diff --git a/runtime/jdwp/jdwp_handler.cc b/runtime/jdwp/jdwp_handler.cc
index 2457f14..8e9ab32 100644
--- a/runtime/jdwp/jdwp_handler.cc
+++ b/runtime/jdwp/jdwp_handler.cc
@@ -133,7 +133,7 @@
 
   if (is_constructor) {
     // If we invoked a constructor (which actually returns void), return the receiver,
-    // unless we threw, in which case we return NULL.
+    // unless we threw, in which case we return null.
     resultTag = JT_OBJECT;
     resultValue = (exceptObjId == 0) ? object_id : 0;
   }
diff --git a/runtime/jit/jit_code_cache.h b/runtime/jit/jit_code_cache.h
index 8a20e39..da891fe 100644
--- a/runtime/jit/jit_code_cache.h
+++ b/runtime/jit/jit_code_cache.h
@@ -86,10 +86,10 @@
   // Return true if the code cache contains a code ptr.
   bool ContainsCodePtr(const void* ptr) const;
 
-  // Reserve a region of code of size at least "size". Returns nullptr if there is no more room.
+  // Reserve a region of code of size at least "size". Returns null if there is no more room.
   uint8_t* ReserveCode(Thread* self, size_t size) LOCKS_EXCLUDED(lock_);
 
-  // Add a data array of size (end - begin) with the associated contents, returns nullptr if there
+  // Add a data array of size (end - begin) with the associated contents, returns null if there
   // is no more room.
   uint8_t* AddDataArray(Thread* self, const uint8_t* begin, const uint8_t* end)
       LOCKS_EXCLUDED(lock_);
diff --git a/runtime/jni_internal.cc b/runtime/jni_internal.cc
index 554a28d..f5a3a6b 100644
--- a/runtime/jni_internal.cc
+++ b/runtime/jni_internal.cc
@@ -350,7 +350,7 @@
     ScopedObjectAccess soa(env);
     mirror::Object* obj_field = soa.Decode<mirror::Object*>(jlr_field);
     if (obj_field->GetClass() != mirror::Field::StaticClass()) {
-      // Not even a java.lang.reflect.Field, return nullptr.
+      // Not even a java.lang.reflect.Field, return null. TODO, is this check necessary?
       return nullptr;
     }
     auto* field = static_cast<mirror::Field*>(obj_field);
diff --git a/runtime/jni_internal_test.cc b/runtime/jni_internal_test.cc
index 5516eab..77db404 100644
--- a/runtime/jni_internal_test.cc
+++ b/runtime/jni_internal_test.cc
@@ -882,7 +882,7 @@
 }
 
 static void BogusMethod() {
-  // You can't pass nullptr function pointers to RegisterNatives.
+  // You can't pass null function pointers to RegisterNatives.
 }
 
 TEST_F(JniInternalTest, RegisterAndUnregisterNatives) {
@@ -1025,13 +1025,13 @@
   env_->set_region_fn(a, size - 1, size, nullptr); \
   ExpectException(aioobe_); \
   \
-  /* It's okay for the buffer to be nullptr as long as the length is 0. */ \
+  /* It's okay for the buffer to be null as long as the length is 0. */ \
   env_->get_region_fn(a, 2, 0, nullptr); \
   /* Even if the offset is invalid... */ \
   env_->get_region_fn(a, 123, 0, nullptr); \
   ExpectException(aioobe_); \
   \
-  /* It's okay for the buffer to be nullptr as long as the length is 0. */ \
+  /* It's okay for the buffer to be null as long as the length is 0. */ \
   env_->set_region_fn(a, 2, 0, nullptr); \
   /* Even if the offset is invalid... */ \
   env_->set_region_fn(a, 123, 0, nullptr); \
@@ -1200,7 +1200,7 @@
 }
 
 TEST_F(JniInternalTest, GetArrayLength) {
-  // Already tested in NewObjectArray/NewPrimitiveArray except for NULL.
+  // Already tested in NewObjectArray/NewPrimitiveArray except for null.
   CheckJniAbortCatcher jni_abort_catcher;
   bool old_check_jni = vm_->SetCheckJniEnabled(false);
   EXPECT_EQ(0, env_->GetArrayLength(nullptr));
@@ -1463,7 +1463,7 @@
   EXPECT_EQ('l', chars[2]);
   EXPECT_EQ('x', chars[3]);
 
-  // It's okay for the buffer to be nullptr as long as the length is 0.
+  // It's okay for the buffer to be null as long as the length is 0.
   env_->GetStringRegion(s, 2, 0, nullptr);
   // Even if the offset is invalid...
   env_->GetStringRegion(s, 123, 0, nullptr);
@@ -1485,7 +1485,7 @@
   EXPECT_EQ('l', bytes[2]);
   EXPECT_EQ('x', bytes[3]);
 
-  // It's okay for the buffer to be nullptr as long as the length is 0.
+  // It's okay for the buffer to be null as long as the length is 0.
   env_->GetStringUTFRegion(s, 2, 0, nullptr);
   // Even if the offset is invalid...
   env_->GetStringUTFRegion(s, 123, 0, nullptr);
@@ -1493,7 +1493,7 @@
 }
 
 TEST_F(JniInternalTest, GetStringUTFChars_ReleaseStringUTFChars) {
-  // Passing in a nullptr jstring is ignored normally, but caught by -Xcheck:jni.
+  // Passing in a null jstring is ignored normally, but caught by -Xcheck:jni.
   bool old_check_jni = vm_->SetCheckJniEnabled(false);
   {
     CheckJniAbortCatcher check_jni_abort_catcher;
@@ -2102,7 +2102,7 @@
   env_->ExceptionClear();
   EXPECT_TRUE(env_->IsInstanceOf(thrown_exception, imse_class));
 
-  // It's an error to call MonitorEnter or MonitorExit on nullptr.
+  // It's an error to call MonitorEnter or MonitorExit on null.
   {
     CheckJniAbortCatcher check_jni_abort_catcher;
     env_->MonitorEnter(nullptr);
diff --git a/runtime/mapping_table.h b/runtime/mapping_table.h
index 79e6e94..dcd5f00 100644
--- a/runtime/mapping_table.h
+++ b/runtime/mapping_table.h
@@ -106,7 +106,7 @@
     const MappingTable* const table_;  // The original table.
     uint32_t element_;  // A value in the range 0 to end_.
     const uint32_t end_;  // Equal to table_->DexToPcSize().
-    const uint8_t* encoded_table_ptr_;  // Either nullptr or points to encoded data after this entry.
+    const uint8_t* encoded_table_ptr_;  // Either null or points to encoded data after this entry.
     uint32_t native_pc_offset_;  // The current value of native pc offset.
     uint32_t dex_pc_;  // The current value of dex pc.
   };
diff --git a/runtime/mem_map.cc b/runtime/mem_map.cc
index edd2888..959bb75 100644
--- a/runtime/mem_map.cc
+++ b/runtime/mem_map.cc
@@ -190,7 +190,7 @@
 // the expected value, calling munmap if validation fails, giving the
 // reason in error_msg.
 //
-// If the expected_ptr is nullptr, nothing is checked beyond the fact
+// If the expected_ptr is null, nothing is checked beyond the fact
 // that the actual_ptr is not MAP_FAILED. However, if expected_ptr is
 // non-null, we check that pointer is the actual_ptr == expected_ptr,
 // and if not, report in error_msg what the conflict mapping was if
@@ -398,8 +398,8 @@
                     page_aligned_byte_count, prot, false);
 }
 
-MemMap* MemMap::MapFileAtAddress(uint8_t* expected_ptr, size_t byte_count, int prot, int flags, int fd,
-                                 off_t start, bool reuse, const char* filename,
+MemMap* MemMap::MapFileAtAddress(uint8_t* expected_ptr, size_t byte_count, int prot, int flags,
+                                 int fd, off_t start, bool reuse, const char* filename,
                                  std::string* error_msg) {
   CHECK_NE(0, prot);
   CHECK_NE(0, flags & (MAP_SHARED | MAP_PRIVATE));
@@ -429,7 +429,8 @@
   size_t page_aligned_byte_count = RoundUp(byte_count + page_offset, kPageSize);
   // The 'expected_ptr' is modified (if specified, ie non-null) to be page aligned to the file but
   // not necessarily to virtual memory. mmap will page align 'expected' for us.
-  uint8_t* page_aligned_expected = (expected_ptr == nullptr) ? nullptr : (expected_ptr - page_offset);
+  uint8_t* page_aligned_expected =
+      (expected_ptr == nullptr) ? nullptr : (expected_ptr - page_offset);
 
   uint8_t* actual = reinterpret_cast<uint8_t*>(mmap(page_aligned_expected,
                                               page_aligned_byte_count,
diff --git a/runtime/mem_map.h b/runtime/mem_map.h
index 11b2569..dc6d935 100644
--- a/runtime/mem_map.h
+++ b/runtime/mem_map.h
@@ -53,24 +53,25 @@
 class MemMap {
  public:
   // Request an anonymous region of length 'byte_count' and a requested base address.
-  // Use NULL as the requested base address if you don't care.
+  // Use null as the requested base address if you don't care.
   // "reuse" allows re-mapping an address range from an existing mapping.
   //
   // The word "anonymous" in this context means "not backed by a file". The supplied
   // 'ashmem_name' will be used -- on systems that support it -- to give the mapping
   // a name.
   //
-  // On success, returns returns a MemMap instance.  On failure, returns a NULL;
+  // On success, returns returns a MemMap instance.  On failure, returns null.
   static MemMap* MapAnonymous(const char* ashmem_name, uint8_t* addr, size_t byte_count, int prot,
                               bool low_4gb, bool reuse, std::string* error_msg);
 
   // Map part of a file, taking care of non-page aligned offsets.  The
   // "start" offset is absolute, not relative.
   //
-  // On success, returns returns a MemMap instance.  On failure, returns a NULL;
+  // On success, returns returns a MemMap instance.  On failure, returns null.
   static MemMap* MapFile(size_t byte_count, int prot, int flags, int fd, off_t start,
                          const char* filename, std::string* error_msg) {
-    return MapFileAtAddress(NULL, byte_count, prot, flags, fd, start, false, filename, error_msg);
+    return MapFileAtAddress(
+        nullptr, byte_count, prot, flags, fd, start, false, filename, error_msg);
   }
 
   // Map part of a file, taking care of non-page aligned offsets.  The
@@ -79,13 +80,12 @@
   // mapping. "reuse" allows us to create a view into an existing
   // mapping where we do not take ownership of the memory.
   //
-  // On success, returns returns a MemMap instance.  On failure, returns a
-  // nullptr;
+  // On success, returns returns a MemMap instance.  On failure, returns null.
   static MemMap* MapFileAtAddress(uint8_t* addr, size_t byte_count, int prot, int flags, int fd,
                                   off_t start, bool reuse, const char* filename,
                                   std::string* error_msg);
 
-  // Releases the memory mapping
+  // Releases the memory mapping.
   ~MemMap() LOCKS_EXCLUDED(Locks::mem_maps_lock_);
 
   const std::string& GetName() const {
diff --git a/runtime/memory_region.cc b/runtime/memory_region.cc
index 06eba0f..a5c70c3 100644
--- a/runtime/memory_region.cc
+++ b/runtime/memory_region.cc
@@ -25,7 +25,7 @@
 namespace art {
 
 void MemoryRegion::CopyFrom(size_t offset, const MemoryRegion& from) const {
-  CHECK(from.pointer() != NULL);
+  CHECK(from.pointer() != nullptr);
   CHECK_GT(from.size(), 0U);
   CHECK_GE(this->size(), from.size());
   CHECK_LE(offset, this->size() - from.size());
diff --git a/runtime/mirror/art_method-inl.h b/runtime/mirror/art_method-inl.h
index 5fc96ad..0f306e8 100644
--- a/runtime/mirror/art_method-inl.h
+++ b/runtime/mirror/art_method-inl.h
@@ -48,7 +48,7 @@
 
 inline Class* ArtMethod::GetDeclaringClass() {
   Class* result = GetFieldObject<Class>(OFFSET_OF_OBJECT_MEMBER(ArtMethod, declaring_class_));
-  DCHECK(result != NULL) << this;
+  DCHECK(result != nullptr) << this;
   DCHECK(result->IsIdxLoaded() || result->IsErroneous()) << this;
   return result;
 }
diff --git a/runtime/mirror/art_method.cc b/runtime/mirror/art_method.cc
index 9483ba6..543cf9b 100644
--- a/runtime/mirror/art_method.cc
+++ b/runtime/mirror/art_method.cc
@@ -362,7 +362,7 @@
   Runtime* runtime = Runtime::Current();
   ClassLinker* class_linker = runtime->GetClassLinker();
   const void* code = runtime->GetInstrumentation()->GetQuickCodeFor(this, pointer_size);
-  // On failure, instead of nullptr we get the quick-generic-jni-trampoline for native method
+  // On failure, instead of null we get the quick-generic-jni-trampoline for native method
   // indicating the generic JNI, or the quick-to-interpreter-bridge (but not the trampoline)
   // for non-native methods.
   if (class_linker->IsQuickToInterpreterBridge(code) ||
@@ -503,7 +503,7 @@
 
   const void* entry_point = runtime->GetInstrumentation()->GetQuickCodeFor(this, sizeof(void*));
   ClassLinker* class_linker = runtime->GetClassLinker();
-  // On failure, instead of nullptr we get the quick-generic-jni-trampoline for native method
+  // On failure, instead of null we get the quick-generic-jni-trampoline for native method
   // indicating the generic JNI, or the quick-to-interpreter-bridge (but not the trampoline)
   // for non-native methods. And we really shouldn't see a failure for non-native methods here.
   DCHECK(!class_linker->IsQuickToInterpreterBridge(entry_point));
diff --git a/runtime/mirror/art_method.h b/runtime/mirror/art_method.h
index b899b25..0da5925 100644
--- a/runtime/mirror/art_method.h
+++ b/runtime/mirror/art_method.h
@@ -341,10 +341,10 @@
     return reinterpret_cast<const void*>(code);
   }
 
-  // Actual entry point pointer to compiled oat code or nullptr.
+  // Actual entry point pointer to compiled oat code or null.
   const void* GetQuickOatEntryPoint(size_t pointer_size)
       SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
-  // Actual pointer to compiled oat code or nullptr.
+  // Actual pointer to compiled oat code or null.
   const void* GetQuickOatCodePointer(size_t pointer_size)
       SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
     return EntryPointToCodePointer(GetQuickOatEntryPoint(pointer_size));
diff --git a/runtime/mirror/class-inl.h b/runtime/mirror/class-inl.h
index aaa66f9..712286f 100644
--- a/runtime/mirror/class-inl.h
+++ b/runtime/mirror/class-inl.h
@@ -66,7 +66,7 @@
 
 inline void Class::SetDirectMethods(ObjectArray<ArtMethod>* new_direct_methods)
     SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
-  DCHECK(NULL == GetFieldObject<ObjectArray<ArtMethod>>(
+  DCHECK(nullptr == GetFieldObject<ObjectArray<ArtMethod>>(
       OFFSET_OF_OBJECT_MEMBER(Class, direct_methods_)));
   DCHECK_NE(0, new_direct_methods->GetLength());
   SetFieldObject<false>(OFFSET_OF_OBJECT_MEMBER(Class, direct_methods_), new_direct_methods);
@@ -85,7 +85,7 @@
 
 // Returns the number of static, private, and constructor methods.
 inline uint32_t Class::NumDirectMethods() {
-  return (GetDirectMethods() != NULL) ? GetDirectMethods()->GetLength() : 0;
+  return (GetDirectMethods() != nullptr) ? GetDirectMethods()->GetLength() : 0;
 }
 
 template<VerifyObjectFlags kVerifyFlags>
@@ -102,7 +102,7 @@
 }
 
 inline uint32_t Class::NumVirtualMethods() {
-  return (GetVirtualMethods() != NULL) ? GetVirtualMethods()->GetLength() : 0;
+  return (GetVirtualMethods() != nullptr) ? GetVirtualMethods()->GetLength() : 0;
 }
 
 template<VerifyObjectFlags kVerifyFlags>
@@ -186,7 +186,7 @@
 }
 
 inline bool Class::Implements(Class* klass) {
-  DCHECK(klass != NULL);
+  DCHECK(klass != nullptr);
   DCHECK(klass->IsInterface()) << PrettyClass(this);
   // All interfaces implemented directly and by our superclass, and
   // recursively all super-interfaces of those interfaces, are listed
@@ -233,8 +233,8 @@
     // If "this" is not also an array, it must be Object.
     // src's super should be java_lang_Object, since it is an array.
     Class* java_lang_Object = src->GetSuperClass();
-    DCHECK(java_lang_Object != NULL) << PrettyClass(src);
-    DCHECK(java_lang_Object->GetSuperClass() == NULL) << PrettyClass(src);
+    DCHECK(java_lang_Object != nullptr) << PrettyClass(src);
+    DCHECK(java_lang_Object->GetSuperClass() == nullptr) << PrettyClass(src);
     return this == java_lang_Object;
   }
   return IsArrayAssignableFromArray(src);
@@ -335,13 +335,13 @@
       return true;
     }
     current = current->GetSuperClass();
-  } while (current != NULL);
+  } while (current != nullptr);
   return false;
 }
 
 inline ArtMethod* Class::FindVirtualMethodForInterface(ArtMethod* method) {
   Class* declaring_class = method->GetDeclaringClass();
-  DCHECK(declaring_class != NULL) << PrettyClass(this);
+  DCHECK(declaring_class != nullptr) << PrettyClass(this);
   DCHECK(declaring_class->IsInterface()) << PrettyMethod(method);
   // TODO cache to improve lookup speed
   int32_t iftable_count = GetIfTableCount();
@@ -351,7 +351,7 @@
       return iftable->GetMethodArray(i)->Get(method->GetMethodIndex());
     }
   }
-  return NULL;
+  return nullptr;
 }
 
 inline ArtMethod* Class::FindVirtualMethodForVirtual(ArtMethod* method) {
@@ -382,7 +382,7 @@
 
 inline int32_t Class::GetIfTableCount() {
   IfTable* iftable = GetIfTable();
-  if (iftable == NULL) {
+  if (iftable == nullptr) {
     return 0;
   }
   return iftable->Count();
@@ -484,7 +484,7 @@
 }
 
 inline void Class::SetVerifyErrorClass(Class* klass) {
-  CHECK(klass != NULL) << PrettyClass(this);
+  CHECK(klass != nullptr) << PrettyClass(this);
   if (Runtime::Current()->IsActiveTransaction()) {
     SetFieldObject<true>(OFFSET_OF_OBJECT_MEMBER(Class, verify_error_class_), klass);
   } else {
diff --git a/runtime/mirror/class.h b/runtime/mirror/class.h
index 5005346..18496fd 100644
--- a/runtime/mirror/class.h
+++ b/runtime/mirror/class.h
@@ -400,7 +400,7 @@
   // Depth of class from java.lang.Object
   uint32_t Depth() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
     uint32_t depth = 0;
-    for (Class* klass = this; klass->GetSuperClass() != NULL; klass = klass->GetSuperClass()) {
+    for (Class* klass = this; klass->GetSuperClass() != nullptr; klass = klass->GetSuperClass()) {
       depth++;
     }
     return depth;
@@ -409,7 +409,7 @@
   template<VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags,
            ReadBarrierOption kReadBarrierOption = kWithReadBarrier>
   bool IsArrayClass() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
-    return GetComponentType<kVerifyFlags, kReadBarrierOption>() != NULL;
+    return GetComponentType<kVerifyFlags, kReadBarrierOption>() != nullptr;
   }
 
   template<VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags,
@@ -437,8 +437,8 @@
   }
 
   void SetComponentType(Class* new_component_type) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
-    DCHECK(GetComponentType() == NULL);
-    DCHECK(new_component_type != NULL);
+    DCHECK(GetComponentType() == nullptr);
+    DCHECK(new_component_type != nullptr);
     // Component type is invariant: use non-transactional mode without check.
     SetFieldObject<false, false>(ComponentTypeOffset(), new_component_type);
   }
@@ -454,7 +454,7 @@
   }
 
   bool IsObjectClass() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
-    return !IsPrimitive() && GetSuperClass() == NULL;
+    return !IsPrimitive() && GetSuperClass() == nullptr;
   }
 
   bool IsInstantiableNonArray() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
@@ -611,7 +611,7 @@
   // that extends) another can be assigned to its parent, but not vice-versa. All Classes may assign
   // to themselves. Classes for primitive types may not assign to each other.
   ALWAYS_INLINE bool IsAssignableFrom(Class* src) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
-    DCHECK(src != NULL);
+    DCHECK(src != nullptr);
     if (this == src) {
       // Can always assign to things of the same type.
       return true;
@@ -638,7 +638,7 @@
   }
 
   bool HasSuperClass() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
-    return GetSuperClass() != NULL;
+    return GetSuperClass() != nullptr;
   }
 
   static MemberOffset SuperClassOffset() {
@@ -1103,14 +1103,14 @@
 
   bool ProxyDescriptorEquals(const char* match) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
 
-  // defining class loader, or NULL for the "bootstrap" system loader
+  // Defining class loader, or null for the "bootstrap" system loader.
   HeapReference<ClassLoader> class_loader_;
 
   // For array classes, the component class object for instanceof/checkcast
-  // (for String[][][], this will be String[][]). NULL for non-array classes.
+  // (for String[][][], this will be String[][]). null for non-array classes.
   HeapReference<Class> component_type_;
 
-  // DexCache of resolved constant pool entries (will be NULL for classes generated by the
+  // DexCache of resolved constant pool entries (will be null for classes generated by the
   // runtime such as arrays and primitive classes).
   HeapReference<DexCache> dex_cache_;
 
@@ -1136,7 +1136,7 @@
   // Descriptor for the class such as "java.lang.Class" or "[C". Lazily initialized by ComputeName
   HeapReference<String> name_;
 
-  // The superclass, or NULL if this is java.lang.Object, an interface or primitive type.
+  // The superclass, or null if this is java.lang.Object, an interface or primitive type.
   HeapReference<Class> super_class_;
 
   // If class verify fails, we must return same error on subsequent tries.
diff --git a/runtime/mirror/dex_cache_test.cc b/runtime/mirror/dex_cache_test.cc
index 1d6846b..228fce5 100644
--- a/runtime/mirror/dex_cache_test.cc
+++ b/runtime/mirror/dex_cache_test.cc
@@ -34,10 +34,10 @@
 TEST_F(DexCacheTest, Open) {
   ScopedObjectAccess soa(Thread::Current());
   StackHandleScope<1> hs(soa.Self());
-  ASSERT_TRUE(java_lang_dex_file_ != NULL);
+  ASSERT_TRUE(java_lang_dex_file_ != nullptr);
   Handle<DexCache> dex_cache(
       hs.NewHandle(class_linker_->AllocDexCache(soa.Self(), *java_lang_dex_file_)));
-  ASSERT_TRUE(dex_cache.Get() != NULL);
+  ASSERT_TRUE(dex_cache.Get() != nullptr);
 
   EXPECT_EQ(java_lang_dex_file_->NumStringIds(), dex_cache->NumStrings());
   EXPECT_EQ(java_lang_dex_file_->NumTypeIds(),   dex_cache->NumResolvedTypes());
diff --git a/runtime/mirror/iftable-inl.h b/runtime/mirror/iftable-inl.h
index d1309d2..b465d07 100644
--- a/runtime/mirror/iftable-inl.h
+++ b/runtime/mirror/iftable-inl.h
@@ -23,7 +23,7 @@
 namespace mirror {
 
 inline void IfTable::SetInterface(int32_t i, Class* interface) {
-  DCHECK(interface != NULL);
+  DCHECK(interface != nullptr);
   DCHECK(interface->IsInterface());
   const size_t idx = i * kMax + kInterface;
   DCHECK_EQ(Get(idx), static_cast<Object*>(nullptr));
diff --git a/runtime/mirror/iftable.h b/runtime/mirror/iftable.h
index 4d899d2..1c1c7b3 100644
--- a/runtime/mirror/iftable.h
+++ b/runtime/mirror/iftable.h
@@ -27,7 +27,7 @@
  public:
   ALWAYS_INLINE Class* GetInterface(int32_t i) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
     Class* interface = GetWithoutChecks((i * kMax) + kInterface)->AsClass();
-    DCHECK(interface != NULL);
+    DCHECK(interface != nullptr);
     return interface;
   }
 
@@ -37,14 +37,14 @@
   ObjectArray<ArtMethod>* GetMethodArray(int32_t i) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
     ObjectArray<ArtMethod>* method_array =
         down_cast<ObjectArray<ArtMethod>*>(Get((i * kMax) + kMethodArray));
-    DCHECK(method_array != NULL);
+    DCHECK(method_array != nullptr);
     return method_array;
   }
 
   size_t GetMethodArrayCount(int32_t i) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
     ObjectArray<ArtMethod>* method_array =
         down_cast<ObjectArray<ArtMethod>*>(Get((i * kMax) + kMethodArray));
-    if (method_array == NULL) {
+    if (method_array == nullptr) {
       return 0;
     }
     return method_array->GetLength();
@@ -52,8 +52,8 @@
 
   void SetMethodArray(int32_t i, ObjectArray<ArtMethod>* new_ma)
       SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
-    DCHECK(new_ma != NULL);
-    DCHECK(Get((i * kMax) + kMethodArray) == NULL);
+    DCHECK(new_ma != nullptr);
+    DCHECK(Get((i * kMax) + kMethodArray) == nullptr);
     Set<false>((i * kMax) + kMethodArray, new_ma);
   }
 
diff --git a/runtime/mirror/object-inl.h b/runtime/mirror/object-inl.h
index af0e856..2581fad 100644
--- a/runtime/mirror/object-inl.h
+++ b/runtime/mirror/object-inl.h
@@ -48,7 +48,7 @@
 
 template<VerifyObjectFlags kVerifyFlags>
 inline void Object::SetClass(Class* new_klass) {
-  // new_klass may be NULL prior to class linker initialization.
+  // new_klass may be null prior to class linker initialization.
   // We don't mark the card as this occurs as part of object allocation. Not all objects have
   // backing cards, such as large objects.
   // We use non transactional version since we can't undo this write. We also disable checking as
@@ -179,15 +179,15 @@
 
 template<VerifyObjectFlags kVerifyFlags>
 inline bool Object::VerifierInstanceOf(Class* klass) {
-  DCHECK(klass != NULL);
-  DCHECK(GetClass<kVerifyFlags>() != NULL);
+  DCHECK(klass != nullptr);
+  DCHECK(GetClass<kVerifyFlags>() != nullptr);
   return klass->IsInterface() || InstanceOf(klass);
 }
 
 template<VerifyObjectFlags kVerifyFlags>
 inline bool Object::InstanceOf(Class* klass) {
-  DCHECK(klass != NULL);
-  DCHECK(GetClass<kVerifyNone>() != NULL);
+  DCHECK(klass != nullptr);
+  DCHECK(GetClass<kVerifyNone>() != nullptr);
   return klass->IsAssignableFrom(GetClass<kVerifyFlags>());
 }
 
diff --git a/runtime/mirror/object.cc b/runtime/mirror/object.cc
index 04d0cd8..5dac985 100644
--- a/runtime/mirror/object.cc
+++ b/runtime/mirror/object.cc
@@ -203,7 +203,7 @@
       !runtime->GetHeap()->IsObjectValidationEnabled() || !c->IsResolved()) {
     return;
   }
-  for (Class* cur = c; cur != NULL; cur = cur->GetSuperClass()) {
+  for (Class* cur = c; cur != nullptr; cur = cur->GetSuperClass()) {
     ArtField* fields = cur->GetIFields();
     for (size_t i = 0, count = cur->NumInstanceFields(); i < count; ++i) {
       StackHandleScope<1> hs(Thread::Current());
diff --git a/runtime/mirror/object_array-inl.h b/runtime/mirror/object_array-inl.h
index 30bc1cd..d473816 100644
--- a/runtime/mirror/object_array-inl.h
+++ b/runtime/mirror/object_array-inl.h
@@ -57,14 +57,14 @@
 inline T* ObjectArray<T>::Get(int32_t i) {
   if (!CheckIsValidIndex(i)) {
     DCHECK(Thread::Current()->IsExceptionPending());
-    return NULL;
+    return nullptr;
   }
   return GetFieldObject<T>(OffsetOfElement(i));
 }
 
 template<class T> template<VerifyObjectFlags kVerifyFlags>
 inline bool ObjectArray<T>::CheckAssignable(T* object) {
-  if (object != NULL) {
+  if (object != nullptr) {
     Class* element_class = GetClass<kVerifyFlags>()->GetComponentType();
     if (UNLIKELY(!object->InstanceOf(element_class))) {
       ThrowArrayStoreException(object);
diff --git a/runtime/mirror/object_test.cc b/runtime/mirror/object_test.cc
index 747a008..2262af5 100644
--- a/runtime/mirror/object_test.cc
+++ b/runtime/mirror/object_test.cc
@@ -61,11 +61,12 @@
     Handle<String> string(
         hs.NewHandle(String::AllocFromModifiedUtf8(self, expected_utf16_length, utf8_in)));
     ASSERT_EQ(expected_utf16_length, string->GetLength());
-    ASSERT_TRUE(string->GetCharArray() != NULL);
-    ASSERT_TRUE(string->GetCharArray()->GetData() != NULL);
+    ASSERT_TRUE(string->GetCharArray() != nullptr);
+    ASSERT_TRUE(string->GetCharArray()->GetData() != nullptr);
     // strlen is necessary because the 1-character string "\x00\x00" is interpreted as ""
     ASSERT_TRUE(string->Equals(utf8_in) || (expected_utf16_length == 1 && strlen(utf8_in) == 0));
-    ASSERT_TRUE(string->Equals(StringPiece(utf8_in)) || (expected_utf16_length == 1 && strlen(utf8_in) == 0));
+    ASSERT_TRUE(string->Equals(StringPiece(utf8_in)) ||
+                (expected_utf16_length == 1 && strlen(utf8_in) == 0));
     for (int32_t i = 0; i < expected_utf16_length; i++) {
       EXPECT_EQ(utf16_expected[i], string->UncheckedCharAt(i));
     }
@@ -110,11 +111,11 @@
   Handle<ObjectArray<Object>> oa(
       hs.NewHandle(class_linker_->AllocObjectArray<Object>(soa.Self(), 2)));
   EXPECT_EQ(2, oa->GetLength());
-  EXPECT_TRUE(oa->Get(0) == NULL);
-  EXPECT_TRUE(oa->Get(1) == NULL);
+  EXPECT_TRUE(oa->Get(0) == nullptr);
+  EXPECT_TRUE(oa->Get(1) == nullptr);
   oa->Set<false>(0, oa.Get());
   EXPECT_TRUE(oa->Get(0) == oa.Get());
-  EXPECT_TRUE(oa->Get(1) == NULL);
+  EXPECT_TRUE(oa->Get(1) == nullptr);
   oa->Set<false>(1, oa.Get());
   EXPECT_TRUE(oa->Get(0) == oa.Get());
   EXPECT_TRUE(oa->Get(1) == oa.Get());
@@ -122,17 +123,17 @@
   Class* aioobe = class_linker_->FindSystemClass(soa.Self(),
                                                  "Ljava/lang/ArrayIndexOutOfBoundsException;");
 
-  EXPECT_TRUE(oa->Get(-1) == NULL);
+  EXPECT_TRUE(oa->Get(-1) == nullptr);
   EXPECT_TRUE(soa.Self()->IsExceptionPending());
   EXPECT_EQ(aioobe, soa.Self()->GetException()->GetClass());
   soa.Self()->ClearException();
 
-  EXPECT_TRUE(oa->Get(2) == NULL);
+  EXPECT_TRUE(oa->Get(2) == nullptr);
   EXPECT_TRUE(soa.Self()->IsExceptionPending());
   EXPECT_EQ(aioobe, soa.Self()->GetException()->GetClass());
   soa.Self()->ClearException();
 
-  ASSERT_TRUE(oa->GetClass() != NULL);
+  ASSERT_TRUE(oa->GetClass() != nullptr);
   Handle<mirror::Class> klass(hs.NewHandle(oa->GetClass()));
   ASSERT_EQ(2U, klass->NumDirectInterfaces());
   EXPECT_EQ(class_linker_->FindSystemClass(soa.Self(), "Ljava/lang/Cloneable;"),
@@ -308,13 +309,14 @@
   Class* java_util_Arrays = class_linker_->FindSystemClass(soa.Self(), "Ljava/util/Arrays;");
   ArtMethod* sort = java_util_Arrays->FindDirectMethod("sort", "([I)V");
   const DexFile::StringId* string_id = java_lang_dex_file_->FindStringId("[I");
-  ASSERT_TRUE(string_id != NULL);
+  ASSERT_TRUE(string_id != nullptr);
   const DexFile::TypeId* type_id = java_lang_dex_file_->FindTypeId(
       java_lang_dex_file_->GetIndexForStringId(*string_id));
-  ASSERT_TRUE(type_id != NULL);
+  ASSERT_TRUE(type_id != nullptr);
   uint32_t type_idx = java_lang_dex_file_->GetIndexForTypeId(*type_id);
-  Object* array = CheckAndAllocArrayFromCodeInstrumented(type_idx, 3, sort, Thread::Current(), false,
-                                                         Runtime::Current()->GetHeap()->GetCurrentAllocator());
+  Object* array = CheckAndAllocArrayFromCodeInstrumented(
+      type_idx, 3, sort, Thread::Current(), false,
+      Runtime::Current()->GetHeap()->GetCurrentAllocator());
   EXPECT_TRUE(array->IsArrayInstance());
   EXPECT_EQ(3, array->AsArray()->GetLength());
   EXPECT_TRUE(array->GetClass()->IsArrayClass());
@@ -367,36 +369,36 @@
   Class* klass = class_linker_->FindClass(soa.Self(), "LStaticsFromCode;", loader);
   ArtMethod* clinit = klass->FindClassInitializer();
   const DexFile::StringId* klass_string_id = dex_file->FindStringId("LStaticsFromCode;");
-  ASSERT_TRUE(klass_string_id != NULL);
+  ASSERT_TRUE(klass_string_id != nullptr);
   const DexFile::TypeId* klass_type_id = dex_file->FindTypeId(
       dex_file->GetIndexForStringId(*klass_string_id));
-  ASSERT_TRUE(klass_type_id != NULL);
+  ASSERT_TRUE(klass_type_id != nullptr);
 
   const DexFile::StringId* type_string_id = dex_file->FindStringId("Ljava/lang/Object;");
-  ASSERT_TRUE(type_string_id != NULL);
+  ASSERT_TRUE(type_string_id != nullptr);
   const DexFile::TypeId* type_type_id = dex_file->FindTypeId(
       dex_file->GetIndexForStringId(*type_string_id));
-  ASSERT_TRUE(type_type_id != NULL);
+  ASSERT_TRUE(type_type_id != nullptr);
 
   const DexFile::StringId* name_str_id = dex_file->FindStringId("s0");
-  ASSERT_TRUE(name_str_id != NULL);
+  ASSERT_TRUE(name_str_id != nullptr);
 
   const DexFile::FieldId* field_id = dex_file->FindFieldId(
       *klass_type_id, *name_str_id, *type_type_id);
-  ASSERT_TRUE(field_id != NULL);
+  ASSERT_TRUE(field_id != nullptr);
   uint32_t field_idx = dex_file->GetIndexForFieldId(*field_id);
 
   ArtField* field = FindFieldFromCode<StaticObjectRead, true>(field_idx, clinit, Thread::Current(),
                                                               sizeof(HeapReference<Object>));
   Object* s0 = field->GetObj(klass);
-  EXPECT_TRUE(s0 != NULL);
+  EXPECT_TRUE(s0 != nullptr);
 
   Handle<CharArray> char_array(hs.NewHandle(CharArray::Alloc(soa.Self(), 0)));
   field->SetObj<false>(field->GetDeclaringClass(), char_array.Get());
   EXPECT_EQ(char_array.Get(), field->GetObj(klass));
 
-  field->SetObj<false>(field->GetDeclaringClass(), NULL);
-  EXPECT_EQ(NULL, field->GetObj(klass));
+  field->SetObj<false>(field->GetDeclaringClass(), nullptr);
+  EXPECT_EQ(nullptr, field->GetObj(klass));
 
   // TODO: more exhaustive tests of all 6 cases of ArtField::*FromCode
 }
@@ -416,13 +418,15 @@
   AssertString(1, "\xc2\x80",   "\x00\x80",                 0x80);
   AssertString(1, "\xd9\xa6",   "\x06\x66",                 0x0666);
   AssertString(1, "\xdf\xbf",   "\x07\xff",                 0x07ff);
-  AssertString(3, "h\xd9\xa6i", "\x00\x68\x06\x66\x00\x69", (31 * ((31 * 0x68) + 0x0666)) + 0x69);
+  AssertString(3, "h\xd9\xa6i", "\x00\x68\x06\x66\x00\x69",
+               (31 * ((31 * 0x68) + 0x0666)) + 0x69);
 
   // Test three-byte characters.
   AssertString(1, "\xe0\xa0\x80",   "\x08\x00",                 0x0800);
   AssertString(1, "\xe1\x88\xb4",   "\x12\x34",                 0x1234);
   AssertString(1, "\xef\xbf\xbf",   "\xff\xff",                 0xffff);
-  AssertString(3, "h\xe1\x88\xb4i", "\x00\x68\x12\x34\x00\x69", (31 * ((31 * 0x68) + 0x1234)) + 0x69);
+  AssertString(3, "h\xe1\x88\xb4i", "\x00\x68\x12\x34\x00\x69",
+               (31 * ((31 * 0x68) + 0x1234)) + 0x69);
 
   // Test four-byte characters.
   AssertString(2, "\xf0\x9f\x8f\xa0",  "\xd8\x3c\xdf\xe0", (31 * 0xd83c) + 0xdfe0);
@@ -507,9 +511,9 @@
   Handle<ClassLoader> class_loader_2(hs.NewHandle(soa.Decode<ClassLoader*>(jclass_loader_2)));
 
   Class* klass1 = linker->FindClass(soa.Self(), "LProtoCompare;", class_loader_1);
-  ASSERT_TRUE(klass1 != NULL);
+  ASSERT_TRUE(klass1 != nullptr);
   Class* klass2 = linker->FindClass(soa.Self(), "LProtoCompare2;", class_loader_2);
-  ASSERT_TRUE(klass2 != NULL);
+  ASSERT_TRUE(klass2 != nullptr);
 
   ArtMethod* m1_1 = klass1->GetVirtualMethod(0);
   EXPECT_STREQ(m1_1->GetName(), "m1");
@@ -550,13 +554,13 @@
 
   Class* X = class_linker_->FindClass(soa.Self(), "LX;", class_loader);
   Class* Y = class_linker_->FindClass(soa.Self(), "LY;", class_loader);
-  ASSERT_TRUE(X != NULL);
-  ASSERT_TRUE(Y != NULL);
+  ASSERT_TRUE(X != nullptr);
+  ASSERT_TRUE(Y != nullptr);
 
   Handle<Object> x(hs.NewHandle(X->AllocObject(soa.Self())));
   Handle<Object> y(hs.NewHandle(Y->AllocObject(soa.Self())));
-  ASSERT_TRUE(x.Get() != NULL);
-  ASSERT_TRUE(y.Get() != NULL);
+  ASSERT_TRUE(x.Get() != nullptr);
+  ASSERT_TRUE(y.Get() != nullptr);
 
   EXPECT_TRUE(x->InstanceOf(X));
   EXPECT_FALSE(x->InstanceOf(Y));
@@ -571,8 +575,10 @@
 
   // All array classes implement Cloneable and Serializable.
   Object* array = ObjectArray<Object>::Alloc(soa.Self(), Object_array_class, 1);
-  Class* java_lang_Cloneable = class_linker_->FindSystemClass(soa.Self(), "Ljava/lang/Cloneable;");
-  Class* java_io_Serializable = class_linker_->FindSystemClass(soa.Self(), "Ljava/io/Serializable;");
+  Class* java_lang_Cloneable =
+      class_linker_->FindSystemClass(soa.Self(), "Ljava/lang/Cloneable;");
+  Class* java_io_Serializable =
+      class_linker_->FindSystemClass(soa.Self(), "Ljava/io/Serializable;");
   EXPECT_TRUE(array->InstanceOf(java_lang_Cloneable));
   EXPECT_TRUE(array->InstanceOf(java_io_Serializable));
 }
@@ -622,35 +628,35 @@
   Handle<ClassLoader> class_loader(hs.NewHandle(soa.Decode<ClassLoader*>(jclass_loader)));
   Class* X = class_linker_->FindClass(soa.Self(), "LX;", class_loader);
   Class* Y = class_linker_->FindClass(soa.Self(), "LY;", class_loader);
-  ASSERT_TRUE(X != NULL);
-  ASSERT_TRUE(Y != NULL);
+  ASSERT_TRUE(X != nullptr);
+  ASSERT_TRUE(Y != nullptr);
 
   Class* YA = class_linker_->FindClass(soa.Self(), "[LY;", class_loader);
   Class* YAA = class_linker_->FindClass(soa.Self(), "[[LY;", class_loader);
-  ASSERT_TRUE(YA != NULL);
-  ASSERT_TRUE(YAA != NULL);
+  ASSERT_TRUE(YA != nullptr);
+  ASSERT_TRUE(YAA != nullptr);
 
   Class* XAA = class_linker_->FindClass(soa.Self(), "[[LX;", class_loader);
-  ASSERT_TRUE(XAA != NULL);
+  ASSERT_TRUE(XAA != nullptr);
 
   Class* O = class_linker_->FindSystemClass(soa.Self(), "Ljava/lang/Object;");
   Class* OA = class_linker_->FindSystemClass(soa.Self(), "[Ljava/lang/Object;");
   Class* OAA = class_linker_->FindSystemClass(soa.Self(), "[[Ljava/lang/Object;");
   Class* OAAA = class_linker_->FindSystemClass(soa.Self(), "[[[Ljava/lang/Object;");
-  ASSERT_TRUE(O != NULL);
-  ASSERT_TRUE(OA != NULL);
-  ASSERT_TRUE(OAA != NULL);
-  ASSERT_TRUE(OAAA != NULL);
+  ASSERT_TRUE(O != nullptr);
+  ASSERT_TRUE(OA != nullptr);
+  ASSERT_TRUE(OAA != nullptr);
+  ASSERT_TRUE(OAAA != nullptr);
 
   Class* S = class_linker_->FindSystemClass(soa.Self(), "Ljava/io/Serializable;");
   Class* SA = class_linker_->FindSystemClass(soa.Self(), "[Ljava/io/Serializable;");
   Class* SAA = class_linker_->FindSystemClass(soa.Self(), "[[Ljava/io/Serializable;");
-  ASSERT_TRUE(S != NULL);
-  ASSERT_TRUE(SA != NULL);
-  ASSERT_TRUE(SAA != NULL);
+  ASSERT_TRUE(S != nullptr);
+  ASSERT_TRUE(SA != nullptr);
+  ASSERT_TRUE(SAA != nullptr);
 
   Class* IA = class_linker_->FindSystemClass(soa.Self(), "[I");
-  ASSERT_TRUE(IA != NULL);
+  ASSERT_TRUE(IA != nullptr);
 
   EXPECT_TRUE(YAA->IsAssignableFrom(YAA));  // identity
   EXPECT_TRUE(XAA->IsAssignableFrom(YAA));  // element superclass
@@ -673,60 +679,62 @@
   ScopedObjectAccess soa(Thread::Current());
   StackHandleScope<1> hs(soa.Self());
   Handle<String> s(hs.NewHandle(String::AllocFromModifiedUtf8(soa.Self(), "ABC")));
-  ASSERT_TRUE(s.Get() != NULL);
+  ASSERT_TRUE(s.Get() != nullptr);
   Class* c = s->GetClass();
-  ASSERT_TRUE(c != NULL);
+  ASSERT_TRUE(c != nullptr);
 
   // Wrong type.
-  EXPECT_TRUE(c->FindDeclaredInstanceField("count", "J") == NULL);
-  EXPECT_TRUE(c->FindInstanceField("count", "J") == NULL);
+  EXPECT_TRUE(c->FindDeclaredInstanceField("count", "J") == nullptr);
+  EXPECT_TRUE(c->FindInstanceField("count", "J") == nullptr);
 
   // Wrong name.
-  EXPECT_TRUE(c->FindDeclaredInstanceField("Count", "I") == NULL);
-  EXPECT_TRUE(c->FindInstanceField("Count", "I") == NULL);
+  EXPECT_TRUE(c->FindDeclaredInstanceField("Count", "I") == nullptr);
+  EXPECT_TRUE(c->FindInstanceField("Count", "I") == nullptr);
 
   // Right name and type.
   ArtField* f1 = c->FindDeclaredInstanceField("count", "I");
   ArtField* f2 = c->FindInstanceField("count", "I");
-  EXPECT_TRUE(f1 != NULL);
-  EXPECT_TRUE(f2 != NULL);
+  EXPECT_TRUE(f1 != nullptr);
+  EXPECT_TRUE(f2 != nullptr);
   EXPECT_EQ(f1, f2);
 
   // TODO: check that s.count == 3.
 
   // Ensure that we handle superclass fields correctly...
   c = class_linker_->FindSystemClass(soa.Self(), "Ljava/lang/StringBuilder;");
-  ASSERT_TRUE(c != NULL);
+  ASSERT_TRUE(c != nullptr);
   // No StringBuilder.count...
-  EXPECT_TRUE(c->FindDeclaredInstanceField("count", "I") == NULL);
+  EXPECT_TRUE(c->FindDeclaredInstanceField("count", "I") == nullptr);
   // ...but there is an AbstractStringBuilder.count.
-  EXPECT_TRUE(c->FindInstanceField("count", "I") != NULL);
+  EXPECT_TRUE(c->FindInstanceField("count", "I") != nullptr);
 }
 
 TEST_F(ObjectTest, FindStaticField) {
   ScopedObjectAccess soa(Thread::Current());
   StackHandleScope<4> hs(soa.Self());
   Handle<String> s(hs.NewHandle(String::AllocFromModifiedUtf8(soa.Self(), "ABC")));
-  ASSERT_TRUE(s.Get() != NULL);
+  ASSERT_TRUE(s.Get() != nullptr);
   Handle<Class> c(hs.NewHandle(s->GetClass()));
-  ASSERT_TRUE(c.Get() != NULL);
+  ASSERT_TRUE(c.Get() != nullptr);
 
   // Wrong type.
-  EXPECT_TRUE(c->FindDeclaredStaticField("CASE_INSENSITIVE_ORDER", "I") == NULL);
-  EXPECT_TRUE(mirror::Class::FindStaticField(soa.Self(), c, "CASE_INSENSITIVE_ORDER", "I") == NULL);
+  EXPECT_TRUE(c->FindDeclaredStaticField("CASE_INSENSITIVE_ORDER", "I") == nullptr);
+  EXPECT_TRUE(mirror::Class::FindStaticField(
+      soa.Self(), c, "CASE_INSENSITIVE_ORDER", "I") == nullptr);
 
   // Wrong name.
-  EXPECT_TRUE(c->FindDeclaredStaticField("cASE_INSENSITIVE_ORDER", "Ljava/util/Comparator;") == NULL);
+  EXPECT_TRUE(c->FindDeclaredStaticField(
+      "cASE_INSENSITIVE_ORDER", "Ljava/util/Comparator;") == nullptr);
   EXPECT_TRUE(
       mirror::Class::FindStaticField(soa.Self(), c, "cASE_INSENSITIVE_ORDER",
-                                     "Ljava/util/Comparator;") == NULL);
+                                     "Ljava/util/Comparator;") == nullptr);
 
   // Right name and type.
   ArtField* f1 = c->FindDeclaredStaticField("CASE_INSENSITIVE_ORDER", "Ljava/util/Comparator;");
   ArtField* f2 = mirror::Class::FindStaticField(soa.Self(), c, "CASE_INSENSITIVE_ORDER",
                                                 "Ljava/util/Comparator;");
-  EXPECT_TRUE(f1 != NULL);
-  EXPECT_TRUE(f2 != NULL);
+  EXPECT_TRUE(f1 != nullptr);
+  EXPECT_TRUE(f2 != nullptr);
   EXPECT_EQ(f1, f2);
 
   // TODO: test static fields via superclasses.
diff --git a/runtime/mirror/stack_trace_element.cc b/runtime/mirror/stack_trace_element.cc
index ec2b495..96f6a53 100644
--- a/runtime/mirror/stack_trace_element.cc
+++ b/runtime/mirror/stack_trace_element.cc
@@ -30,7 +30,7 @@
 
 void StackTraceElement::SetClass(Class* java_lang_StackTraceElement) {
   CHECK(java_lang_StackTraceElement_.IsNull());
-  CHECK(java_lang_StackTraceElement != NULL);
+  CHECK(java_lang_StackTraceElement != nullptr);
   java_lang_StackTraceElement_ = GcRoot<Class>(java_lang_StackTraceElement);
 }
 
@@ -44,7 +44,7 @@
                                             int32_t line_number) {
   StackTraceElement* trace =
       down_cast<StackTraceElement*>(GetStackTraceElement()->AllocObject(self));
-  if (LIKELY(trace != NULL)) {
+  if (LIKELY(trace != nullptr)) {
     if (Runtime::Current()->IsActiveTransaction()) {
       trace->Init<true>(declaring_class, method_name, file_name, line_number);
     } else {
diff --git a/runtime/mirror/string-inl.h b/runtime/mirror/string-inl.h
index 4a95519..b367cff 100644
--- a/runtime/mirror/string-inl.h
+++ b/runtime/mirror/string-inl.h
@@ -50,7 +50,7 @@
 inline void String::SetArray(CharArray* new_array) {
   // Array is invariant so use non-transactional mode. Also disable check as we may run inside
   // a transaction.
-  DCHECK(new_array != NULL);
+  DCHECK(new_array != nullptr);
   SetFieldObject<false, false>(OFFSET_OF_OBJECT_MEMBER(String, array_), new_array);
 }
 
diff --git a/runtime/mirror/string.cc b/runtime/mirror/string.cc
index bd6a63c..b7fd240 100644
--- a/runtime/mirror/string.cc
+++ b/runtime/mirror/string.cc
@@ -53,7 +53,7 @@
 
 void String::SetClass(Class* java_lang_String) {
   CHECK(java_lang_String_.IsNull());
-  CHECK(java_lang_String != NULL);
+  CHECK(java_lang_String != nullptr);
   java_lang_String_ = GcRoot<Class>(java_lang_String);
 }
 
@@ -137,7 +137,7 @@
   if (this == that) {
     // Quick reference equality test
     return true;
-  } else if (that == NULL) {
+  } else if (that == nullptr) {
     // Null isn't an instanceof anything
     return false;
   } else if (this->GetLength() != that->GetLength()) {
diff --git a/runtime/mirror/throwable.cc b/runtime/mirror/throwable.cc
index b564649..ca94644 100644
--- a/runtime/mirror/throwable.cc
+++ b/runtime/mirror/throwable.cc
@@ -45,7 +45,7 @@
   CHECK(cause != nullptr);
   CHECK(cause != this);
   Throwable* current_cause = GetFieldObject<Throwable>(OFFSET_OF_OBJECT_MEMBER(Throwable, cause_));
-  CHECK(current_cause == NULL || current_cause == this);
+  CHECK(current_cause == nullptr || current_cause == this);
   if (Runtime::Current()->IsActiveTransaction()) {
     SetFieldObject<true>(OFFSET_OF_OBJECT_MEMBER(Throwable, cause_), cause);
   } else {
@@ -80,7 +80,7 @@
   std::string result(PrettyTypeOf(this));
   result += ": ";
   String* msg = GetDetailMessage();
-  if (msg != NULL) {
+  if (msg != nullptr) {
     result += msg->ToModifiedUtf8();
   }
   result += "\n";
@@ -135,7 +135,7 @@
 
 void Throwable::SetClass(Class* java_lang_Throwable) {
   CHECK(java_lang_Throwable_.IsNull());
-  CHECK(java_lang_Throwable != NULL);
+  CHECK(java_lang_Throwable != nullptr);
   java_lang_Throwable_ = GcRoot<Class>(java_lang_Throwable);
 }
 
diff --git a/runtime/monitor.cc b/runtime/monitor.cc
index 1a80ded..4b41225 100644
--- a/runtime/monitor.cc
+++ b/runtime/monitor.cc
@@ -68,11 +68,11 @@
  * at any given time.
  */
 
-bool (*Monitor::is_sensitive_thread_hook_)() = NULL;
+bool (*Monitor::is_sensitive_thread_hook_)() = nullptr;
 uint32_t Monitor::lock_profiling_threshold_ = 0;
 
 bool Monitor::IsSensitiveThread() {
-  if (is_sensitive_thread_hook_ != NULL) {
+  if (is_sensitive_thread_hook_ != nullptr) {
     return (*is_sensitive_thread_hook_)();
   }
   return false;
@@ -90,9 +90,9 @@
       owner_(owner),
       lock_count_(0),
       obj_(GcRoot<mirror::Object>(obj)),
-      wait_set_(NULL),
+      wait_set_(nullptr),
       hash_code_(hash_code),
-      locking_method_(NULL),
+      locking_method_(nullptr),
       locking_dex_pc_(0),
       monitor_id_(MonitorPool::ComputeMonitorId(this, self)) {
 #ifdef __LP64__
@@ -113,9 +113,9 @@
       owner_(owner),
       lock_count_(0),
       obj_(GcRoot<mirror::Object>(obj)),
-      wait_set_(NULL),
+      wait_set_(nullptr),
       hash_code_(hash_code),
-      locking_method_(NULL),
+      locking_method_(nullptr),
       locking_dex_pc_(0),
       monitor_id_(id) {
 #ifdef __LP64__
@@ -183,9 +183,9 @@
 
 void Monitor::AppendToWaitSet(Thread* thread) {
   DCHECK(owner_ == Thread::Current());
-  DCHECK(thread != NULL);
+  DCHECK(thread != nullptr);
   DCHECK(thread->GetWaitNext() == nullptr) << thread->GetWaitNext();
-  if (wait_set_ == NULL) {
+  if (wait_set_ == nullptr) {
     wait_set_ = thread;
     return;
   }
@@ -200,8 +200,8 @@
 
 void Monitor::RemoveFromWaitSet(Thread *thread) {
   DCHECK(owner_ == Thread::Current());
-  DCHECK(thread != NULL);
-  if (wait_set_ == NULL) {
+  DCHECK(thread != nullptr);
+  if (wait_set_ == nullptr) {
     return;
   }
   if (wait_set_ == thread) {
@@ -211,7 +211,7 @@
   }
 
   Thread* t = wait_set_;
-  while (t->GetWaitNext() != NULL) {
+  while (t->GetWaitNext() != nullptr) {
     if (t->GetWaitNext() == thread) {
       t->SetWaitNext(thread->GetWaitNext());
       thread->SetWaitNext(nullptr);
@@ -253,7 +253,8 @@
     self->SetMonitorEnterObject(GetObject());
     {
       ScopedThreadStateChange tsc(self, kBlocked);  // Change to blocked and give up mutator_lock_.
-      MutexLock mu2(self, monitor_lock_);  // Reacquire monitor_lock_ without mutator_lock_ for Wait.
+      // Reacquire monitor_lock_ without mutator_lock_ for Wait.
+      MutexLock mu2(self, monitor_lock_);
       if (owner_ != nullptr) {  // Did the owner_ give the lock up?
         if (ATRACE_ENABLED()) {
           std::string name;
@@ -311,8 +312,8 @@
 }
 
 static std::string ThreadToString(Thread* thread) {
-  if (thread == NULL) {
-    return "NULL";
+  if (thread == nullptr) {
+    return "nullptr";
   }
   std::ostringstream oss;
   // TODO: alternatively, we could just return the thread's name.
@@ -322,7 +323,7 @@
 
 void Monitor::FailedUnlock(mirror::Object* o, Thread* expected_owner, Thread* found_owner,
                            Monitor* monitor) {
-  Thread* current_owner = NULL;
+  Thread* current_owner = nullptr;
   std::string current_owner_string;
   std::string expected_owner_string;
   std::string found_owner_string;
@@ -331,14 +332,14 @@
     // Acquire thread list lock so threads won't disappear from under us.
     MutexLock mu(Thread::Current(), *Locks::thread_list_lock_);
     // Re-read owner now that we hold lock.
-    current_owner = (monitor != NULL) ? monitor->GetOwner() : NULL;
+    current_owner = (monitor != nullptr) ? monitor->GetOwner() : nullptr;
     // Get short descriptions of the threads involved.
     current_owner_string = ThreadToString(current_owner);
     expected_owner_string = ThreadToString(expected_owner);
     found_owner_string = ThreadToString(found_owner);
   }
-  if (current_owner == NULL) {
-    if (found_owner == NULL) {
+  if (current_owner == nullptr) {
+    if (found_owner == nullptr) {
       ThrowIllegalMonitorStateExceptionF("unlock of unowned monitor on object of type '%s'"
                                          " on thread '%s'",
                                          PrettyTypeOf(o).c_str(),
@@ -352,7 +353,7 @@
                                          expected_owner_string.c_str());
     }
   } else {
-    if (found_owner == NULL) {
+    if (found_owner == nullptr) {
       // Race: originally there was no owner, there is now
       ThrowIllegalMonitorStateExceptionF("unlock of monitor owned by '%s' on object of type '%s'"
                                          " (originally believed to be unowned) on thread '%s'",
@@ -380,14 +381,14 @@
 }
 
 bool Monitor::Unlock(Thread* self) {
-  DCHECK(self != NULL);
+  DCHECK(self != nullptr);
   MutexLock mu(self, monitor_lock_);
   Thread* owner = owner_;
   if (owner == self) {
     // We own the monitor, so nobody else can be in here.
     if (lock_count_ == 0) {
-      owner_ = NULL;
-      locking_method_ = NULL;
+      owner_ = nullptr;
+      locking_method_ = nullptr;
       locking_dex_pc_ = 0;
       // Wake a contender.
       monitor_contenders_.Signal(self);
@@ -406,7 +407,7 @@
 
 void Monitor::Wait(Thread* self, int64_t ms, int32_t ns,
                    bool interruptShouldThrow, ThreadState why) {
-  DCHECK(self != NULL);
+  DCHECK(self != nullptr);
   DCHECK(why == kTimedWaiting || why == kWaiting || why == kSleeping);
 
   monitor_lock_.Lock(self);
@@ -446,9 +447,9 @@
   ++num_waiters_;
   int prev_lock_count = lock_count_;
   lock_count_ = 0;
-  owner_ = NULL;
+  owner_ = nullptr;
   mirror::ArtMethod* saved_method = locking_method_;
-  locking_method_ = NULL;
+  locking_method_ = nullptr;
   uintptr_t saved_dex_pc = locking_dex_pc_;
   locking_dex_pc_ = 0;
 
@@ -465,7 +466,7 @@
     MutexLock mu(self, *self->GetWaitMutex());
 
     // Set wait_monitor_ to the monitor object we will be waiting on. When wait_monitor_ is
-    // non-NULL a notifying or interrupting thread must signal the thread's wait_cond_ to wake it
+    // non-null a notifying or interrupting thread must signal the thread's wait_cond_ to wake it
     // up.
     DCHECK(self->GetWaitMonitor() == nullptr);
     self->SetWaitMonitor(this);
@@ -538,13 +539,13 @@
       self->SetInterruptedLocked(false);
     }
     if (interruptShouldThrow) {
-      self->ThrowNewException("Ljava/lang/InterruptedException;", NULL);
+      self->ThrowNewException("Ljava/lang/InterruptedException;", nullptr);
     }
   }
 }
 
 void Monitor::Notify(Thread* self) {
-  DCHECK(self != NULL);
+  DCHECK(self != nullptr);
   MutexLock mu(self, monitor_lock_);
   // Make sure that we hold the lock.
   if (owner_ != self) {
@@ -552,7 +553,7 @@
     return;
   }
   // Signal the first waiting thread in the wait set.
-  while (wait_set_ != NULL) {
+  while (wait_set_ != nullptr) {
     Thread* thread = wait_set_;
     wait_set_ = thread->GetWaitNext();
     thread->SetWaitNext(nullptr);
@@ -567,7 +568,7 @@
 }
 
 void Monitor::NotifyAll(Thread* self) {
-  DCHECK(self != NULL);
+  DCHECK(self != nullptr);
   MutexLock mu(self, monitor_lock_);
   // Make sure that we hold the lock.
   if (owner_ != self) {
@@ -575,7 +576,7 @@
     return;
   }
   // Signal all threads in the wait set.
-  while (wait_set_ != NULL) {
+  while (wait_set_ != nullptr) {
     Thread* thread = wait_set_;
     wait_set_ = thread->GetWaitNext();
     thread->SetWaitNext(nullptr);
@@ -625,7 +626,7 @@
       obj->SetLockWord(new_lw, false);
       VLOG(monitor) << "Deflated" << obj << " to empty lock word";
     }
-    // The monitor is deflated, mark the object as nullptr so that we know to delete it during the
+    // The monitor is deflated, mark the object as null so that we know to delete it during the
     // next GC.
     monitor->obj_ = GcRoot<mirror::Object>(nullptr);
   }
@@ -697,8 +698,8 @@
 }
 
 mirror::Object* Monitor::MonitorEnter(Thread* self, mirror::Object* obj) {
-  DCHECK(self != NULL);
-  DCHECK(obj != NULL);
+  DCHECK(self != nullptr);
+  DCHECK(obj != nullptr);
   obj = FakeLock(obj);
   uint32_t thread_id = self->GetThreadId();
   size_t contention_count = 0;
@@ -772,8 +773,8 @@
 }
 
 bool Monitor::MonitorExit(Thread* self, mirror::Object* obj) {
-  DCHECK(self != NULL);
-  DCHECK(obj != NULL);
+  DCHECK(self != nullptr);
+  DCHECK(obj != nullptr);
   obj = FakeUnlock(obj);
   StackHandleScope<1> hs(self);
   Handle<mirror::Object> h_obj(hs.NewHandle(obj));
@@ -979,11 +980,11 @@
   // This is used to implement JDWP's ThreadReference.CurrentContendedMonitor, and has a bizarre
   // definition of contended that includes a monitor a thread is trying to enter...
   mirror::Object* result = thread->GetMonitorEnterObject();
-  if (result == NULL) {
+  if (result == nullptr) {
     // ...but also a monitor that the thread is waiting on.
     MutexLock mu(Thread::Current(), *thread->GetWaitMutex());
     Monitor* monitor = thread->GetWaitMonitor();
-    if (monitor != NULL) {
+    if (monitor != nullptr) {
       result = monitor->GetObject();
     }
   }
@@ -993,7 +994,7 @@
 void Monitor::VisitLocks(StackVisitor* stack_visitor, void (*callback)(mirror::Object*, void*),
                          void* callback_context, bool abort_on_failure) {
   mirror::ArtMethod* m = stack_visitor->GetMethod();
-  CHECK(m != NULL);
+  CHECK(m != nullptr);
 
   // Native methods are an easy special case.
   // TODO: use the JNI implementation's table of explicit MonitorEnter calls and dump those too.
@@ -1013,7 +1014,7 @@
 
   // Is there any reason to believe there's any synchronization in this method?
   const DexFile::CodeItem* code_item = m->GetCodeItem();
-  CHECK(code_item != NULL) << PrettyMethod(m);
+  CHECK(code_item != nullptr) << PrettyMethod(m);
   if (code_item->tries_size_ == 0) {
     return;  // No "tries" implies no synchronization, so no held locks to report.
   }
@@ -1088,13 +1089,13 @@
 void Monitor::TranslateLocation(mirror::ArtMethod* method, uint32_t dex_pc,
                                 const char** source_file, uint32_t* line_number) const {
   // If method is null, location is unknown
-  if (method == NULL) {
+  if (method == nullptr) {
     *source_file = "";
     *line_number = 0;
     return;
   }
   *source_file = method->GetDeclaringClassSourceFile();
-  if (*source_file == NULL) {
+  if (*source_file == nullptr) {
     *source_file = "";
   }
   *line_number = method->GetLineNumFromDexPC(dex_pc);
@@ -1103,7 +1104,7 @@
 uint32_t Monitor::GetOwnerThreadId() {
   MutexLock mu(Thread::Current(), monitor_lock_);
   Thread* owner = owner_;
-  if (owner != NULL) {
+  if (owner != nullptr) {
     return owner->GetThreadId();
   } else {
     return ThreadList::kInvalidThreadId;
@@ -1185,7 +1186,7 @@
   if (Monitor::Deflate(args->self, object)) {
     DCHECK_NE(object->GetLockWord(true).GetState(), LockWord::kFatLocked);
     ++args->deflate_count;
-    // If we deflated, return nullptr so that the monitor gets removed from the array.
+    // If we deflated, return null so that the monitor gets removed from the array.
     return nullptr;
   }
   return object;  // Monitor was not deflated.
@@ -1198,7 +1199,7 @@
   return args.deflate_count;
 }
 
-MonitorInfo::MonitorInfo(mirror::Object* obj) : owner_(NULL), entry_count_(0) {
+MonitorInfo::MonitorInfo(mirror::Object* obj) : owner_(nullptr), entry_count_(0) {
   DCHECK(obj != nullptr);
   LockWord lock_word = obj->GetLockWord(true);
   switch (lock_word.GetState()) {
@@ -1217,7 +1218,7 @@
       Monitor* mon = lock_word.FatLockMonitor();
       owner_ = mon->owner_;
       entry_count_ = 1 + mon->lock_count_;
-      for (Thread* waiter = mon->wait_set_; waiter != NULL; waiter = waiter->GetWaitNext()) {
+      for (Thread* waiter = mon->wait_set_; waiter != nullptr; waiter = waiter->GetWaitNext()) {
         waiters_.push_back(waiter);
       }
       break;
diff --git a/runtime/monitor_android.cc b/runtime/monitor_android.cc
index d89290b..48c9cce 100644
--- a/runtime/monitor_android.cc
+++ b/runtime/monitor_android.cc
@@ -88,7 +88,7 @@
   cp = EventLogWriteInt(cp, line_number);
 
   // Emit the lock owner source code file name, <= 37 bytes.
-  if (owner_filename == NULL) {
+  if (owner_filename == nullptr) {
     owner_filename = "";
   } else if (strcmp(filename, owner_filename) == 0) {
     // Common case, so save on log space.
diff --git a/runtime/monitor_pool.h b/runtime/monitor_pool.h
index 8ae5a54..4ab4e86 100644
--- a/runtime/monitor_pool.h
+++ b/runtime/monitor_pool.h
@@ -138,7 +138,8 @@
     for (size_t index = 0; index < num_chunks_; ++index) {
       uintptr_t chunk_addr = *(monitor_chunks_.LoadRelaxed() + index);
       if (IsInChunk(chunk_addr, mon)) {
-        return OffsetToMonitorId(reinterpret_cast<uintptr_t>(mon) - chunk_addr + index * kChunkSize);
+        return OffsetToMonitorId(
+            reinterpret_cast<uintptr_t>(mon) - chunk_addr + index * kChunkSize);
       }
     }
     LOG(FATAL) << "Did not find chunk that contains monitor.";
diff --git a/runtime/monitor_test.cc b/runtime/monitor_test.cc
index 2351463..30cb2d8 100644
--- a/runtime/monitor_test.cc
+++ b/runtime/monitor_test.cc
@@ -116,8 +116,8 @@
       ScopedObjectAccess soa(self);
 
       monitor_test_->thread_ = self;        // Pass the Thread.
-      monitor_test_->object_.Get()->MonitorEnter(self);     // Lock the object. This should transition
-      LockWord lock_after = monitor_test_->object_.Get()->GetLockWord(false);     // it to thinLocked.
+      monitor_test_->object_.Get()->MonitorEnter(self);  // Lock the object. This should transition
+      LockWord lock_after = monitor_test_->object_.Get()->GetLockWord(false);  // it to thinLocked.
       LockWord::LockState new_state = lock_after.GetState();
 
       // Cannot use ASSERT only, as analysis thinks we'll keep holding the mutex.
diff --git a/runtime/native/dalvik_system_DexFile.cc b/runtime/native/dalvik_system_DexFile.cc
index 87ae64d..4f97d20 100644
--- a/runtime/native/dalvik_system_DexFile.cc
+++ b/runtime/native/dalvik_system_DexFile.cc
@@ -109,7 +109,7 @@
 //
 //   NullableScopedUtfChars name(env, javaName);
 //   if (env->ExceptionCheck()) {
-//       return NULL;
+//       return null;
 //   }
 //   // ... use name.c_str()
 //
@@ -117,7 +117,7 @@
 class NullableScopedUtfChars {
  public:
   NullableScopedUtfChars(JNIEnv* env, jstring s) : mEnv(env), mString(s) {
-    mUtfChars = (s != NULL) ? env->GetStringUTFChars(s, NULL) : NULL;
+    mUtfChars = (s != nullptr) ? env->GetStringUTFChars(s, nullptr) : nullptr;
   }
 
   ~NullableScopedUtfChars() {
@@ -149,9 +149,10 @@
   void operator=(const NullableScopedUtfChars&);
 };
 
-static jobject DexFile_openDexFileNative(JNIEnv* env, jclass, jstring javaSourceName, jstring javaOutputName, jint) {
+static jobject DexFile_openDexFileNative(
+    JNIEnv* env, jclass, jstring javaSourceName, jstring javaOutputName, jint) {
   ScopedUtfChars sourceName(env, javaSourceName);
-  if (sourceName.c_str() == NULL) {
+  if (sourceName.c_str() == nullptr) {
     return 0;
   }
   NullableScopedUtfChars outputName(env, javaOutputName);
@@ -224,9 +225,9 @@
   }
 
   ScopedUtfChars class_name(env, javaName);
-  if (class_name.c_str() == NULL) {
+  if (class_name.c_str() == nullptr) {
     VLOG(class_linker) << "Failed to find class_name";
-    return NULL;
+    return nullptr;
   }
   const std::string descriptor(DotToDescriptor(class_name.c_str()));
   const size_t hash(ComputeModifiedUtf8Hash(descriptor.c_str()));
@@ -367,7 +368,7 @@
                          instruction_set.c_str(), defer);
 }
 
-// public API, NULL pkgname
+// public API, null pkgname
 static jboolean DexFile_isDexOptNeeded(JNIEnv* env, jclass, jstring javaFilename) {
   const char* instruction_set = GetInstructionSetString(kRuntimeISA);
   ScopedUtfChars filename(env, javaFilename);
@@ -378,11 +379,14 @@
 
 static JNINativeMethod gMethods[] = {
   NATIVE_METHOD(DexFile, closeDexFile, "(Ljava/lang/Object;)V"),
-  NATIVE_METHOD(DexFile, defineClassNative, "(Ljava/lang/String;Ljava/lang/ClassLoader;Ljava/lang/Object;)Ljava/lang/Class;"),
+  NATIVE_METHOD(DexFile, defineClassNative,
+                "(Ljava/lang/String;Ljava/lang/ClassLoader;Ljava/lang/Object;)Ljava/lang/Class;"),
   NATIVE_METHOD(DexFile, getClassNameList, "(Ljava/lang/Object;)[Ljava/lang/String;"),
   NATIVE_METHOD(DexFile, isDexOptNeeded, "(Ljava/lang/String;)Z"),
-  NATIVE_METHOD(DexFile, getDexOptNeeded, "(Ljava/lang/String;Ljava/lang/String;Ljava/lang/String;Z)I"),
-  NATIVE_METHOD(DexFile, openDexFileNative, "(Ljava/lang/String;Ljava/lang/String;I)Ljava/lang/Object;"),
+  NATIVE_METHOD(DexFile, getDexOptNeeded,
+                "(Ljava/lang/String;Ljava/lang/String;Ljava/lang/String;Z)I"),
+  NATIVE_METHOD(DexFile, openDexFileNative,
+                "(Ljava/lang/String;Ljava/lang/String;I)Ljava/lang/Object;"),
 };
 
 void register_dalvik_system_DexFile(JNIEnv* env) {
diff --git a/runtime/native/dalvik_system_VMDebug.cc b/runtime/native/dalvik_system_VMDebug.cc
index 876e29a..46881b0 100644
--- a/runtime/native/dalvik_system_VMDebug.cc
+++ b/runtime/native/dalvik_system_VMDebug.cc
@@ -104,7 +104,7 @@
   }
 
   ScopedUtfChars traceFilename(env, javaTraceFilename);
-  if (traceFilename.c_str() == NULL) {
+  if (traceFilename.c_str() == nullptr) {
     return;
   }
   Trace::Start(traceFilename.c_str(), fd, bufferSize, flags, Trace::TraceOutputMode::kFile,
@@ -116,7 +116,7 @@
                                                jint bufferSize, jint flags,
                                                jboolean samplingEnabled, jint intervalUs) {
   ScopedUtfChars traceFilename(env, javaTraceFilename);
-  if (traceFilename.c_str() == NULL) {
+  if (traceFilename.c_str() == nullptr) {
     return;
   }
   Trace::Start(traceFilename.c_str(), -1, bufferSize, flags, Trace::TraceOutputMode::kFile,
@@ -156,7 +156,7 @@
 
 static void ThrowUnsupportedOperationException(JNIEnv* env) {
   ScopedObjectAccess soa(env);
-  soa.Self()->ThrowNewException("Ljava/lang/UnsupportedOperationException;", NULL);
+  soa.Self()->ThrowNewException("Ljava/lang/UnsupportedOperationException;", nullptr);
 }
 
 static void VMDebug_startInstructionCounting(JNIEnv* env, jclass) {
@@ -200,15 +200,15 @@
  * error occurs during file handling.
  */
 static void VMDebug_dumpHprofData(JNIEnv* env, jclass, jstring javaFilename, jobject javaFd) {
-  // Only one of these may be NULL.
-  if (javaFilename == NULL && javaFd == NULL) {
+  // Only one of these may be null.
+  if (javaFilename == nullptr && javaFd == nullptr) {
     ScopedObjectAccess soa(env);
     ThrowNullPointerException("fileName == null && fd == null");
     return;
   }
 
   std::string filename;
-  if (javaFilename != NULL) {
+  if (javaFilename != nullptr) {
     ScopedUtfChars chars(env, javaFilename);
     if (env->ExceptionCheck()) {
       return;
@@ -219,7 +219,7 @@
   }
 
   int fd = -1;
-  if (javaFd != NULL) {
+  if (javaFd != nullptr) {
     fd = jniGetFDFromFileDescriptor(env, javaFd);
     if (fd < 0) {
       ScopedObjectAccess soa(env);
diff --git a/runtime/native/dalvik_system_VMRuntime.cc b/runtime/native/dalvik_system_VMRuntime.cc
index 196a231..53bb129 100644
--- a/runtime/native/dalvik_system_VMRuntime.cc
+++ b/runtime/native/dalvik_system_VMRuntime.cc
@@ -114,7 +114,7 @@
 }
 
 static jlong VMRuntime_addressOf(JNIEnv* env, jobject, jobject javaArray) {
-  if (javaArray == NULL) {  // Most likely allocation failed
+  if (javaArray == nullptr) {  // Most likely allocation failed
     return 0;
   }
   ScopedFastNativeObjectAccess soa(env);
@@ -263,17 +263,17 @@
 };
 
 // Based on ClassLinker::ResolveString.
-static void PreloadDexCachesResolveString(Handle<mirror::DexCache> dex_cache, uint32_t string_idx,
-                                          StringTable& strings)
+static void PreloadDexCachesResolveString(
+    Handle<mirror::DexCache> dex_cache, uint32_t string_idx, StringTable& strings)
     SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
   mirror::String* string = dex_cache->GetResolvedString(string_idx);
-  if (string != NULL) {
+  if (string != nullptr) {
     return;
   }
   const DexFile* dex_file = dex_cache->GetDexFile();
   const char* utf8 = dex_file->StringDataByIdx(string_idx);
   string = strings[utf8];
-  if (string == NULL) {
+  if (string == nullptr) {
     return;
   }
   // LOG(INFO) << "VMRuntime.preloadDexCaches resolved string=" << utf8;
@@ -281,10 +281,11 @@
 }
 
 // Based on ClassLinker::ResolveType.
-static void PreloadDexCachesResolveType(Thread* self, mirror::DexCache* dex_cache, uint32_t type_idx)
+static void PreloadDexCachesResolveType(
+    Thread* self, mirror::DexCache* dex_cache, uint32_t type_idx)
     SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
   mirror::Class* klass = dex_cache->GetResolvedType(type_idx);
-  if (klass != NULL) {
+  if (klass != nullptr) {
     return;
   }
   const DexFile* dex_file = dex_cache->GetDexFile();
@@ -293,9 +294,9 @@
   if (class_name[1] == '\0') {
     klass = linker->FindPrimitiveClass(class_name[0]);
   } else {
-    klass = linker->LookupClass(self, class_name, ComputeModifiedUtf8Hash(class_name), NULL);
+    klass = linker->LookupClass(self, class_name, ComputeModifiedUtf8Hash(class_name), nullptr);
   }
-  if (klass == NULL) {
+  if (klass == nullptr) {
     return;
   }
   // LOG(INFO) << "VMRuntime.preloadDexCaches resolved klass=" << class_name;
@@ -321,7 +322,7 @@
   Thread* const self = Thread::Current();
   StackHandleScope<1> hs(self);
   Handle<mirror::Class> klass(hs.NewHandle(dex_cache->GetResolvedType(field_id.class_idx_)));
-  if (klass.Get() == NULL) {
+  if (klass.Get() == nullptr) {
     return;
   }
   if (is_static) {
@@ -329,7 +330,7 @@
   } else {
     field = klass->FindInstanceField(dex_cache.Get(), field_idx);
   }
-  if (field == NULL) {
+  if (field == nullptr) {
     return;
   }
   // LOG(INFO) << "VMRuntime.preloadDexCaches resolved field " << PrettyField(field);
@@ -341,13 +342,13 @@
                                           InvokeType invoke_type)
     SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
   mirror::ArtMethod* method = dex_cache->GetResolvedMethod(method_idx);
-  if (method != NULL) {
+  if (method != nullptr) {
     return;
   }
   const DexFile* dex_file = dex_cache->GetDexFile();
   const DexFile::MethodId& method_id = dex_file->GetMethodId(method_idx);
   mirror::Class* klass = dex_cache->GetResolvedType(method_id.class_idx_);
-  if (klass == NULL) {
+  if (klass == nullptr) {
     return;
   }
   switch (invoke_type) {
@@ -366,7 +367,7 @@
       LOG(FATAL) << "Unreachable - invocation type: " << invoke_type;
       UNREACHABLE();
   }
-  if (method == NULL) {
+  if (method == nullptr) {
     return;
   }
   // LOG(INFO) << "VMRuntime.preloadDexCaches resolved method " << PrettyMethod(method);
@@ -404,7 +405,7 @@
   const std::vector<const DexFile*>& boot_class_path = linker->GetBootClassPath();
   for (size_t i = 0; i< boot_class_path.size(); i++) {
     const DexFile* dex_file = boot_class_path[i];
-    CHECK(dex_file != NULL);
+    CHECK(dex_file != nullptr);
     total->num_strings += dex_file->NumStringIds();
     total->num_fields += dex_file->NumFieldIds();
     total->num_methods += dex_file->NumMethodIds();
@@ -421,29 +422,29 @@
   const std::vector<const DexFile*>& boot_class_path = linker->GetBootClassPath();
   for (size_t i = 0; i< boot_class_path.size(); i++) {
     const DexFile* dex_file = boot_class_path[i];
-    CHECK(dex_file != NULL);
+    CHECK(dex_file != nullptr);
     mirror::DexCache* dex_cache = linker->FindDexCache(*dex_file);
     for (size_t j = 0; j < dex_cache->NumStrings(); j++) {
       mirror::String* string = dex_cache->GetResolvedString(j);
-      if (string != NULL) {
+      if (string != nullptr) {
         filled->num_strings++;
       }
     }
     for (size_t j = 0; j < dex_cache->NumResolvedTypes(); j++) {
       mirror::Class* klass = dex_cache->GetResolvedType(j);
-      if (klass != NULL) {
+      if (klass != nullptr) {
         filled->num_types++;
       }
     }
     for (size_t j = 0; j < dex_cache->NumResolvedFields(); j++) {
       ArtField* field = linker->GetResolvedField(j, dex_cache);
-      if (field != NULL) {
+      if (field != nullptr) {
         filled->num_fields++;
       }
     }
     for (size_t j = 0; j < dex_cache->NumResolvedMethods(); j++) {
       mirror::ArtMethod* method = dex_cache->GetResolvedMethod(j);
-      if (method != NULL) {
+      if (method != nullptr) {
         filled->num_methods++;
       }
     }
@@ -482,7 +483,7 @@
   const std::vector<const DexFile*>& boot_class_path = linker->GetBootClassPath();
   for (size_t i = 0; i< boot_class_path.size(); i++) {
     const DexFile* dex_file = boot_class_path[i];
-    CHECK(dex_file != NULL);
+    CHECK(dex_file != nullptr);
     StackHandleScope<1> hs(soa.Self());
     Handle<mirror::DexCache> dex_cache(hs.NewHandle(linker->FindDexCache(*dex_file)));
 
@@ -504,7 +505,7 @@
            class_def_index++) {
         const DexFile::ClassDef& class_def = dex_file->GetClassDef(class_def_index);
         const uint8_t* class_data = dex_file->GetClassData(class_def);
-        if (class_data == NULL) {
+        if (class_data == nullptr) {
           continue;
         }
         ClassDataItemIterator it(*dex_file, class_data);
diff --git a/runtime/native/java_lang_Class.cc b/runtime/native/java_lang_Class.cc
index 51a897d..b0d923b 100644
--- a/runtime/native/java_lang_Class.cc
+++ b/runtime/native/java_lang_Class.cc
@@ -42,7 +42,7 @@
     const ScopedFastNativeObjectAccess& soa, jobject java_class)
     SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
   mirror::Class* c = soa.Decode<mirror::Class*>(java_class);
-  DCHECK(c != NULL);
+  DCHECK(c != nullptr);
   DCHECK(c->IsClass());
   // TODO: we could EnsureInitialized here, rather than on every reflective get/set or invoke .
   // For now, we conservatively preserve the old dalvik behavior. A quick "IsInitialized" check
diff --git a/runtime/native/java_lang_DexCache.cc b/runtime/native/java_lang_DexCache.cc
index 1198c2e..b9f8d01 100644
--- a/runtime/native/java_lang_DexCache.cc
+++ b/runtime/native/java_lang_DexCache.cc
@@ -31,14 +31,14 @@
   // Should only be called while holding the lock on the dex cache.
   DCHECK_EQ(dex_cache->GetLockOwnerThreadId(), soa.Self()->GetThreadId());
   const DexFile* dex_file = dex_cache->GetDexFile();
-  if (dex_file == NULL) {
-    return NULL;
+  if (dex_file == nullptr) {
+    return nullptr;
   }
   void* address = const_cast<void*>(reinterpret_cast<const void*>(dex_file->Begin()));
   jobject byte_buffer = env->NewDirectByteBuffer(address, dex_file->Size());
-  if (byte_buffer == NULL) {
+  if (byte_buffer == nullptr) {
     DCHECK(soa.Self()->IsExceptionPending());
-    return NULL;
+    return nullptr;
   }
 
   jvalue args[1];
diff --git a/runtime/native/java_lang_String.cc b/runtime/native/java_lang_String.cc
index 6afe83b..2d153d4 100644
--- a/runtime/native/java_lang_String.cc
+++ b/runtime/native/java_lang_String.cc
@@ -28,7 +28,7 @@
 
 static jint String_compareTo(JNIEnv* env, jobject javaThis, jobject javaRhs) {
   ScopedFastNativeObjectAccess soa(env);
-  if (UNLIKELY(javaRhs == NULL)) {
+  if (UNLIKELY(javaRhs == nullptr)) {
     ThrowNullPointerException("rhs == null");
     return -1;
   } else {
diff --git a/runtime/native/java_lang_Thread.cc b/runtime/native/java_lang_Thread.cc
index d3b52ba..be7022e 100644
--- a/runtime/native/java_lang_Thread.cc
+++ b/runtime/native/java_lang_Thread.cc
@@ -43,7 +43,7 @@
   ScopedFastNativeObjectAccess soa(env);
   MutexLock mu(soa.Self(), *Locks::thread_list_lock_);
   Thread* thread = Thread::FromManagedThread(soa, java_thread);
-  return (thread != NULL) ? thread->IsInterrupted() : JNI_FALSE;
+  return (thread != nullptr) ? thread->IsInterrupted() : JNI_FALSE;
 }
 
 static void Thread_nativeCreate(JNIEnv* env, jclass, jobject java_thread, jlong stack_size,
@@ -64,7 +64,7 @@
   ThreadState internal_thread_state = (has_been_started ? kTerminated : kStarting);
   MutexLock mu(soa.Self(), *Locks::thread_list_lock_);
   Thread* thread = Thread::FromManagedThread(soa, java_thread);
-  if (thread != NULL) {
+  if (thread != nullptr) {
     internal_thread_state = thread->GetState();
   }
   switch (internal_thread_state) {
@@ -99,7 +99,7 @@
 static jboolean Thread_nativeHoldsLock(JNIEnv* env, jobject java_thread, jobject java_object) {
   ScopedObjectAccess soa(env);
   mirror::Object* object = soa.Decode<mirror::Object*>(java_object);
-  if (object == NULL) {
+  if (object == nullptr) {
     ThrowNullPointerException("object == null");
     return JNI_FALSE;
   }
@@ -112,7 +112,7 @@
   ScopedFastNativeObjectAccess soa(env);
   MutexLock mu(soa.Self(), *Locks::thread_list_lock_);
   Thread* thread = Thread::FromManagedThread(soa, java_thread);
-  if (thread != NULL) {
+  if (thread != nullptr) {
     thread->Interrupt(soa.Self());
   }
 }
@@ -133,7 +133,7 @@
   bool timed_out;
   // Take suspend thread lock to avoid races with threads trying to suspend this one.
   Thread* thread = thread_list->SuspendThreadByPeer(peer, true, false, &timed_out);
-  if (thread != NULL) {
+  if (thread != nullptr) {
     {
       ScopedObjectAccess soa(env);
       thread->SetThreadName(name.c_str());
@@ -154,7 +154,7 @@
   ScopedObjectAccess soa(env);
   MutexLock mu(soa.Self(), *Locks::thread_list_lock_);
   Thread* thread = Thread::FromManagedThread(soa, java_thread);
-  if (thread != NULL) {
+  if (thread != nullptr) {
     thread->SetNativePriority(new_priority);
   }
 }
diff --git a/runtime/native/java_lang_reflect_Array.cc b/runtime/native/java_lang_reflect_Array.cc
index eddd7de..beb953b 100644
--- a/runtime/native/java_lang_reflect_Array.cc
+++ b/runtime/native/java_lang_reflect_Array.cc
@@ -27,13 +27,14 @@
 
 namespace art {
 
-static jobject Array_createMultiArray(JNIEnv* env, jclass, jclass javaElementClass, jobject javaDimArray) {
+static jobject Array_createMultiArray(
+    JNIEnv* env, jclass, jclass javaElementClass, jobject javaDimArray) {
   ScopedFastNativeObjectAccess soa(env);
-  DCHECK(javaElementClass != NULL);
+  DCHECK(javaElementClass != nullptr);
   StackHandleScope<2> hs(soa.Self());
   Handle<mirror::Class> element_class(hs.NewHandle(soa.Decode<mirror::Class*>(javaElementClass)));
   DCHECK(element_class->IsClass());
-  DCHECK(javaDimArray != NULL);
+  DCHECK(javaDimArray != nullptr);
   mirror::Object* dimensions_obj = soa.Decode<mirror::Object*>(javaDimArray);
   DCHECK(dimensions_obj->IsArrayInstance());
   DCHECK_EQ(dimensions_obj->GetClass()->GetComponentType()->GetPrimitiveType(),
@@ -47,18 +48,18 @@
 
 static jobject Array_createObjectArray(JNIEnv* env, jclass, jclass javaElementClass, jint length) {
   ScopedFastNativeObjectAccess soa(env);
-  DCHECK(javaElementClass != NULL);
+  DCHECK(javaElementClass != nullptr);
   if (UNLIKELY(length < 0)) {
     ThrowNegativeArraySizeException(length);
-    return NULL;
+    return nullptr;
   }
   mirror::Class* element_class = soa.Decode<mirror::Class*>(javaElementClass);
   Runtime* runtime = Runtime::Current();
   ClassLinker* class_linker = runtime->GetClassLinker();
   mirror::Class* array_class = class_linker->FindArrayClass(soa.Self(), &element_class);
-  if (UNLIKELY(array_class == NULL)) {
+  if (UNLIKELY(array_class == nullptr)) {
     CHECK(soa.Self()->IsExceptionPending());
-    return NULL;
+    return nullptr;
   }
   DCHECK(array_class->IsObjectArrayClass());
   mirror::Array* new_array = mirror::ObjectArray<mirror::Object*>::Alloc(
diff --git a/runtime/native/org_apache_harmony_dalvik_ddmc_DdmVmInternal.cc b/runtime/native/org_apache_harmony_dalvik_ddmc_DdmVmInternal.cc
index 987427e..b96ddc8 100644
--- a/runtime/native/org_apache_harmony_dalvik_ddmc_DdmVmInternal.cc
+++ b/runtime/native/org_apache_harmony_dalvik_ddmc_DdmVmInternal.cc
@@ -43,7 +43,7 @@
 
 /*
  * Get a stack trace as an array of StackTraceElement objects.  Returns
- * NULL on failure, e.g. if the threadId couldn't be found.
+ * nullptr on failure, e.g. if the threadId couldn't be found.
  */
 static jobjectArray DdmVmInternal_getStackTraceById(JNIEnv* env, jclass, jint thin_lock_id) {
   jobjectArray trace = nullptr;
@@ -145,7 +145,7 @@
   }
 
   jbyteArray result = env->NewByteArray(bytes.size());
-  if (result != NULL) {
+  if (result != nullptr) {
     env->SetByteArrayRegion(result, 0, bytes.size(), reinterpret_cast<const jbyte*>(&bytes[0]));
   }
   return result;
diff --git a/runtime/nth_caller_visitor.h b/runtime/nth_caller_visitor.h
index a851f21..632ccde 100644
--- a/runtime/nth_caller_visitor.h
+++ b/runtime/nth_caller_visitor.h
@@ -27,20 +27,20 @@
 // Walks up the stack 'n' callers, when used with Thread::WalkStack.
 struct NthCallerVisitor : public StackVisitor {
   NthCallerVisitor(Thread* thread, size_t n_in, bool include_runtime_and_upcalls = false)
-      : StackVisitor(thread, NULL), n(n_in),
-        include_runtime_and_upcalls_(include_runtime_and_upcalls), count(0), caller(NULL) {}
+      : StackVisitor(thread, nullptr), n(n_in),
+        include_runtime_and_upcalls_(include_runtime_and_upcalls), count(0), caller(nullptr) {}
 
   bool VisitFrame() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
     mirror::ArtMethod* m = GetMethod();
     bool do_count = false;
-    if (m == NULL || m->IsRuntimeMethod()) {
+    if (m == nullptr || m->IsRuntimeMethod()) {
       // Upcall.
       do_count = include_runtime_and_upcalls_;
     } else {
       do_count = true;
     }
     if (do_count) {
-      DCHECK(caller == NULL);
+      DCHECK(caller == nullptr);
       if (count == n) {
         caller = m;
         return false;
diff --git a/runtime/oat_file.cc b/runtime/oat_file.cc
index eddbd8a..b0cbd0e 100644
--- a/runtime/oat_file.cc
+++ b/runtime/oat_file.cc
@@ -497,7 +497,7 @@
     MutexLock mu(Thread::Current(), secondary_lookup_lock_);
     auto secondary_lb = secondary_oat_dex_files_.lower_bound(key);
     if (secondary_lb != secondary_oat_dex_files_.end() && key == secondary_lb->first) {
-      oat_dex_file = secondary_lb->second;  // May be nullptr.
+      oat_dex_file = secondary_lb->second;  // May be null.
     } else {
       // We haven't seen this dex_location before, we must check the canonical location.
       std::string dex_canonical_location = DexFile::GetDexCanonicalLocation(dex_location);
@@ -506,8 +506,8 @@
         auto canonical_it = oat_dex_files_.find(canonical_key);
         if (canonical_it != oat_dex_files_.end()) {
           oat_dex_file = canonical_it->second;
-        }  // else keep nullptr.
-      }  // else keep nullptr.
+        }  // else keep null.
+      }  // else keep null.
 
       // Copy the key to the string_cache_ and store the result in secondary map.
       string_cache_.emplace_back(key.data(), key.length());
diff --git a/runtime/oat_file.h b/runtime/oat_file.h
index 42c60dc..b32dd22 100644
--- a/runtime/oat_file.h
+++ b/runtime/oat_file.h
@@ -48,7 +48,7 @@
   static OatFile* OpenWithElfFile(ElfFile* elf_file, const std::string& location,
                                   const char* abs_dex_location,
                                   std::string* error_msg);
-  // Open an oat file. Returns NULL on failure.  Requested base can
+  // Open an oat file. Returns null on failure.  Requested base can
   // optionally be used to request where the file should be loaded.
   // See the ResolveRelativeEncodedDexLocation for a description of how the
   // abs_dex_location argument is used.
@@ -149,7 +149,7 @@
     template<class T>
     T GetOatPointer(uint32_t offset) const {
       if (offset == 0) {
-        return NULL;
+        return nullptr;
       }
       return reinterpret_cast<T>(begin_ + offset);
     }
@@ -177,7 +177,7 @@
     const OatMethod GetOatMethod(uint32_t method_index) const;
 
     // Return a pointer to the OatMethodOffsets for the requested
-    // method_index, or nullptr if none is present. Note that most
+    // method_index, or null if none is present. Note that most
     // callers should use GetOatMethod.
     const OatMethodOffsets* GetOatMethodOffsets(uint32_t method_index) const;
 
@@ -238,7 +238,7 @@
 
   // Returns the absolute dex location for the encoded relative dex location.
   //
-  // If not nullptr, abs_dex_location is used to resolve the absolute dex
+  // If not null, abs_dex_location is used to resolve the absolute dex
   // location of relative dex locations encoded in the oat file.
   // For example, given absolute location "/data/app/foo/base.apk", encoded
   // dex locations "base.apk", "base.apk:classes2.dex", etc. would be resolved
@@ -300,10 +300,10 @@
   // Pointer to end of oat region for bounds checking.
   const uint8_t* end_;
 
-  // Pointer to the .bss section, if present, otherwise nullptr.
+  // Pointer to the .bss section, if present, otherwise null.
   const uint8_t* bss_begin_;
 
-  // Pointer to the end of the .bss section, if present, otherwise nullptr.
+  // Pointer to the end of the .bss section, if present, otherwise null.
   const uint8_t* bss_end_;
 
   // Was this oat_file loaded executable?
@@ -331,7 +331,7 @@
   // Map each location and canonical location (if different) retrieved from the
   // oat file to its OatDexFile. This map doesn't change after it's constructed in Setup()
   // and therefore doesn't need any locking and provides the cheapest dex file lookup
-  // for GetOatDexFile() for a very frequent use case. Never contains a nullptr value.
+  // for GetOatDexFile() for a very frequent use case. Never contains a null value.
   Table oat_dex_files_;
 
   // Lock guarding all members needed for secondary lookup in GetOatDexFile().
diff --git a/runtime/oat_file_assistant.cc b/runtime/oat_file_assistant.cc
index e5c27b2..37e85ab 100644
--- a/runtime/oat_file_assistant.cc
+++ b/runtime/oat_file_assistant.cc
@@ -233,7 +233,7 @@
   for (int i = 1; ; i++) {
     std::string secondary_dex_location = DexFile::GetMultiDexClassesDexName(i, dex_location);
     oat_dex_file = oat_file.GetOatDexFile(secondary_dex_location.c_str(), nullptr, false);
-    if (oat_dex_file == NULL) {
+    if (oat_dex_file == nullptr) {
       // There are no more secondary dex files to load.
       break;
     }
@@ -393,12 +393,12 @@
 
 bool OatFileAssistant::GivenOatFileIsOutOfDate(const OatFile& file) {
   // Verify the dex checksum.
-  // Note: GetOatDexFile will return NULL if the dex checksum doesn't match
+  // Note: GetOatDexFile will return null if the dex checksum doesn't match
   // what we provide, which verifies the primary dex checksum for us.
   const uint32_t* dex_checksum_pointer = GetRequiredDexChecksum();
   const OatFile::OatDexFile* oat_dex_file = file.GetOatDexFile(
       dex_location_, dex_checksum_pointer, false);
-  if (oat_dex_file == NULL) {
+  if (oat_dex_file == nullptr) {
     return true;
   }
 
@@ -408,7 +408,7 @@
       = DexFile::GetMultiDexClassesDexName(i, dex_location_);
     const OatFile::OatDexFile* secondary_oat_dex_file
       = file.GetOatDexFile(secondary_dex_location.c_str(), nullptr, false);
-    if (secondary_oat_dex_file == NULL) {
+    if (secondary_oat_dex_file == nullptr) {
       // There are no more secondary dex files to check.
       break;
     }
diff --git a/runtime/oat_file_assistant.h b/runtime/oat_file_assistant.h
index 9e7c2ef..a25ee31 100644
--- a/runtime/oat_file_assistant.h
+++ b/runtime/oat_file_assistant.h
@@ -85,7 +85,7 @@
   // Constructs an OatFileAssistant object to assist the oat file
   // corresponding to the given dex location with the target instruction set.
   //
-  // The dex_location must not be NULL and should remain available and
+  // The dex_location must not be null and should remain available and
   // unchanged for the duration of the lifetime of the OatFileAssistant object.
   // Typically the dex_location is the absolute path to the original,
   // un-optimized dex file.
@@ -152,11 +152,11 @@
   // Returns true on success.
   //
   // If there is a failure, the value of error_msg will be set to a string
-  // describing why there was failure. error_msg must not be nullptr.
+  // describing why there was failure. error_msg must not be null.
   bool MakeUpToDate(std::string* error_msg);
 
   // Returns an oat file that can be used for loading dex files.
-  // Returns nullptr if no suitable oat file was found.
+  // Returns null if no suitable oat file was found.
   //
   // After this call, no other methods of the OatFileAssistant should be
   // called, because access to the loaded oat file has been taken away from
@@ -244,7 +244,7 @@
   // This will fail if dex2oat is not enabled in the current runtime.
   //
   // If there is a failure, the value of error_msg will be set to a string
-  // describing why there was failure. error_msg must not be nullptr.
+  // describing why there was failure. error_msg must not be null.
   bool RelocateOatFile(const std::string* input_file, std::string* error_msg);
 
   // Generate the oat file from the dex file.
@@ -254,7 +254,7 @@
   // This will fail if dex2oat is not enabled in the current runtime.
   //
   // If there is a failure, the value of error_msg will be set to a string
-  // describing why there was failure. error_msg must not be nullptr.
+  // describing why there was failure. error_msg must not be null.
   bool GenerateOatFile(std::string* error_msg);
 
   // Executes dex2oat using the current runtime configuration overridden with
@@ -263,7 +263,7 @@
   // Returns true on success.
   //
   // If there is a failure, the value of error_msg will be set to a string
-  // describing why there was failure. error_msg must not be nullptr.
+  // describing why there was failure. error_msg must not be null.
   //
   // TODO: The OatFileAssistant probably isn't the right place to have this
   // function.
@@ -310,12 +310,12 @@
 
   // Gets the dex checksum required for an up-to-date oat file.
   // Returns dex_checksum if a required checksum was located. Returns
-  // nullptr if the required checksum was not found.
+  // null if the required checksum was not found.
   // The caller shouldn't clean up or free the returned pointer.
   const uint32_t* GetRequiredDexChecksum();
 
   // Returns the loaded odex file.
-  // Loads the file if needed. Returns nullptr if the file failed to load.
+  // Loads the file if needed. Returns null if the file failed to load.
   // The caller shouldn't clean up or free the returned pointer.
   const OatFile* GetOdexFile();
 
@@ -324,7 +324,7 @@
   void ClearOdexFileCache();
 
   // Returns the loaded oat file.
-  // Loads the file if needed. Returns nullptr if the file failed to load.
+  // Loads the file if needed. Returns null if the file failed to load.
   // The caller shouldn't clean up or free the returned pointer.
   const OatFile* GetOatFile();
 
@@ -333,19 +333,19 @@
   void ClearOatFileCache();
 
   // Returns the loaded image info.
-  // Loads the image info if needed. Returns nullptr if the image info failed
+  // Loads the image info if needed. Returns null if the image info failed
   // to load.
   // The caller shouldn't clean up or free the returned pointer.
   const ImageInfo* GetImageInfo();
 
   // Returns the loaded profile.
-  // Loads the profile if needed. Returns nullptr if the profile failed
+  // Loads the profile if needed. Returns null if the profile failed
   // to load.
   // The caller shouldn't clean up or free the returned pointer.
   ProfileFile* GetProfile();
 
   // Returns the loaded old profile.
-  // Loads the old profile if needed. Returns nullptr if the old profile
+  // Loads the old profile if needed. Returns null if the old profile
   // failed to load.
   // The caller shouldn't clean up or free the returned pointer.
   ProfileFile* GetOldProfile();
@@ -357,7 +357,7 @@
   ScopedFlock flock_;
 
   // In a properly constructed OatFileAssistant object, dex_location_ should
-  // never be nullptr.
+  // never be null.
   const char* dex_location_ = nullptr;
 
   // In a properly constructed OatFileAssistant object, isa_ should be either
@@ -365,7 +365,7 @@
   const InstructionSet isa_ = kNone;
 
   // The package name, used solely to find the profile file.
-  // This may be nullptr in a properly constructed object. In this case,
+  // This may be null in a properly constructed object. In this case,
   // profile_load_attempted_ and old_profile_load_attempted_ will be true, and
   // profile_load_succeeded_ and old_profile_load_succeeded_ will be false.
   const char* package_name_ = nullptr;
diff --git a/runtime/oat_file_assistant_test.cc b/runtime/oat_file_assistant_test.cc
index 0c942d2..3f6b2d2 100644
--- a/runtime/oat_file_assistant_test.cc
+++ b/runtime/oat_file_assistant_test.cc
@@ -118,7 +118,7 @@
   std::string GetImageDirectory() {
     if (IsHost()) {
       const char* host_dir = getenv("ANDROID_HOST_OUT");
-      CHECK(host_dir != NULL);
+      CHECK(host_dir != nullptr);
       return std::string(host_dir) + "/framework";
     } else {
       return std::string("/data/art-test");
diff --git a/runtime/object_callbacks.h b/runtime/object_callbacks.h
index cf81cc5..8e99dbb 100644
--- a/runtime/object_callbacks.h
+++ b/runtime/object_callbacks.h
@@ -41,9 +41,10 @@
 typedef mirror::Object* (MarkObjectCallback)(mirror::Object* obj, void* arg) WARN_UNUSED;
 
 typedef void (MarkHeapReferenceCallback)(mirror::HeapReference<mirror::Object>* ref, void* arg);
-typedef void (DelayReferenceReferentCallback)(mirror::Class* klass, mirror::Reference* ref, void* arg);
+typedef void (DelayReferenceReferentCallback)(mirror::Class* klass, mirror::Reference* ref,
+    void* arg);
 
-// A callback for testing if an object is marked, returns nullptr if not marked, otherwise the new
+// A callback for testing if an object is marked, returns null if not marked, otherwise the new
 // address the object (if the object didn't move, returns the object input parameter).
 typedef mirror::Object* (IsMarkedCallback)(mirror::Object* object, void* arg) WARN_UNUSED;
 
diff --git a/runtime/os_linux.cc b/runtime/os_linux.cc
index e4403d7..2282789 100644
--- a/runtime/os_linux.cc
+++ b/runtime/os_linux.cc
@@ -40,10 +40,10 @@
 }
 
 File* OS::OpenFileWithFlags(const char* name, int flags) {
-  CHECK(name != NULL);
+  CHECK(name != nullptr);
   std::unique_ptr<File> file(new File);
   if (!file->Open(name, flags, 0666)) {
-    return NULL;
+    return nullptr;
   }
   return file.release();
 }
diff --git a/runtime/parsed_options.cc b/runtime/parsed_options.cc
index 620a4bd..0bc834f 100644
--- a/runtime/parsed_options.cc
+++ b/runtime/parsed_options.cc
@@ -324,7 +324,7 @@
     } else if (option == "vfprintf") {
       const void* hook = options[i].second;
       if (hook == nullptr) {
-        Usage("vfprintf argument was NULL");
+        Usage("vfprintf argument was nullptr");
         return false;
       }
       int (*hook_vfprintf)(FILE *, const char*, va_list) =
@@ -337,7 +337,7 @@
     } else if (option == "exit") {
       const void* hook = options[i].second;
       if (hook == nullptr) {
-        Usage("exit argument was NULL");
+        Usage("exit argument was nullptr");
         return false;
       }
       void(*hook_exit)(jint) = reinterpret_cast<void(*)(jint)>(const_cast<void*>(hook));
@@ -348,7 +348,7 @@
     } else if (option == "abort") {
       const void* hook = options[i].second;
       if (hook == nullptr) {
-        Usage("abort was NULL\n");
+        Usage("abort was nullptr\n");
         return false;
       }
       void(*hook_abort)() = reinterpret_cast<void(*)()>(const_cast<void*>(hook));
diff --git a/runtime/parsed_options_test.cc b/runtime/parsed_options_test.cc
index 658b656..a8575de 100644
--- a/runtime/parsed_options_test.cc
+++ b/runtime/parsed_options_test.cc
@@ -33,7 +33,6 @@
   void* test_vfprintf = reinterpret_cast<void*>(0xa);
   void* test_abort = reinterpret_cast<void*>(0xb);
   void* test_exit = reinterpret_cast<void*>(0xc);
-  void* null = reinterpret_cast<void*>(NULL);
 
   std::string lib_core(CommonRuntimeTest::GetLibCoreDexFileName());
 
@@ -42,27 +41,27 @@
   boot_class_path += lib_core;
 
   RuntimeOptions options;
-  options.push_back(std::make_pair(boot_class_path.c_str(), null));
-  options.push_back(std::make_pair("-classpath", null));
-  options.push_back(std::make_pair(lib_core.c_str(), null));
-  options.push_back(std::make_pair("-cp", null));
-  options.push_back(std::make_pair(lib_core.c_str(), null));
-  options.push_back(std::make_pair("-Ximage:boot_image", null));
-  options.push_back(std::make_pair("-Xcheck:jni", null));
-  options.push_back(std::make_pair("-Xms2048", null));
-  options.push_back(std::make_pair("-Xmx4k", null));
-  options.push_back(std::make_pair("-Xss1m", null));
-  options.push_back(std::make_pair("-XX:HeapTargetUtilization=0.75", null));
-  options.push_back(std::make_pair("-Dfoo=bar", null));
-  options.push_back(std::make_pair("-Dbaz=qux", null));
-  options.push_back(std::make_pair("-verbose:gc,class,jni", null));
+  options.push_back(std::make_pair(boot_class_path.c_str(), nullptr));
+  options.push_back(std::make_pair("-classpath", nullptr));
+  options.push_back(std::make_pair(lib_core.c_str(), nullptr));
+  options.push_back(std::make_pair("-cp", nullptr));
+  options.push_back(std::make_pair(lib_core.c_str(), nullptr));
+  options.push_back(std::make_pair("-Ximage:boot_image", nullptr));
+  options.push_back(std::make_pair("-Xcheck:jni", nullptr));
+  options.push_back(std::make_pair("-Xms2048", nullptr));
+  options.push_back(std::make_pair("-Xmx4k", nullptr));
+  options.push_back(std::make_pair("-Xss1m", nullptr));
+  options.push_back(std::make_pair("-XX:HeapTargetUtilization=0.75", nullptr));
+  options.push_back(std::make_pair("-Dfoo=bar", nullptr));
+  options.push_back(std::make_pair("-Dbaz=qux", nullptr));
+  options.push_back(std::make_pair("-verbose:gc,class,jni", nullptr));
   options.push_back(std::make_pair("vfprintf", test_vfprintf));
   options.push_back(std::make_pair("abort", test_abort));
   options.push_back(std::make_pair("exit", test_exit));
 
   RuntimeArgumentMap map;
   std::unique_ptr<ParsedOptions> parsed(ParsedOptions::Create(options, false, &map));
-  ASSERT_TRUE(parsed.get() != NULL);
+  ASSERT_TRUE(parsed.get() != nullptr);
   ASSERT_NE(0u, map.Size());
 
   using Opt = RuntimeArgumentMap;
@@ -104,7 +103,7 @@
 
   RuntimeArgumentMap map;
   std::unique_ptr<ParsedOptions> parsed(ParsedOptions::Create(options, false, &map));
-  ASSERT_TRUE(parsed.get() != NULL);
+  ASSERT_TRUE(parsed.get() != nullptr);
   ASSERT_NE(0u, map.Size());
 
   using Opt = RuntimeArgumentMap;
diff --git a/runtime/primitive.h b/runtime/primitive.h
index 3818487..0ac5f40 100644
--- a/runtime/primitive.h
+++ b/runtime/primitive.h
@@ -132,7 +132,7 @@
         return "V";
       default:
         LOG(FATAL) << "Primitive char conversion on invalid type " << static_cast<int>(type);
-        return NULL;
+        return nullptr;
     }
   }
 
diff --git a/runtime/profiler.cc b/runtime/profiler.cc
index db372c3..90a47b3 100644
--- a/runtime/profiler.cc
+++ b/runtime/profiler.cc
@@ -58,7 +58,7 @@
   BoundedStackVisitor(std::vector<std::pair<mirror::ArtMethod*, uint32_t>>* stack,
       Thread* thread, uint32_t max_depth)
       SHARED_LOCKS_REQUIRED(Locks::mutator_lock_)
-      : StackVisitor(thread, NULL), stack_(stack), max_depth_(max_depth), depth_(0) {
+      : StackVisitor(thread, nullptr), stack_(stack), max_depth_(max_depth), depth_(0) {
   }
 
   bool VisitFrame() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
@@ -399,7 +399,7 @@
 bool BackgroundMethodSamplingProfiler::ProcessMethod(mirror::ArtMethod* method) {
   if (method == nullptr) {
     profile_table_.NullMethod();
-    // Don't record a nullptr method.
+    // Don't record a null method.
     return false;
   }
 
@@ -820,7 +820,7 @@
     // Bad summary info.  It should be total/null/boot.
     return false;
   }
-  // This is the number of hits in all profiled methods (without nullptr or boot methods)
+  // This is the number of hits in all profiled methods (without null or boot methods)
   uint32_t total_count = strtoul(summary_info[0].c_str(), nullptr, 10);
 
   // Now read each line until the end of file.  Each line consists of 3 fields separated by '/'.
diff --git a/runtime/reference_table.cc b/runtime/reference_table.cc
index beba64f..a31d8ac 100644
--- a/runtime/reference_table.cc
+++ b/runtime/reference_table.cc
@@ -40,7 +40,7 @@
 }
 
 void ReferenceTable::Add(mirror::Object* obj) {
-  DCHECK(obj != NULL);
+  DCHECK(obj != nullptr);
   VerifyObject(obj);
   if (entries_.size() >= max_size_) {
     LOG(FATAL) << "ReferenceTable '" << name_ << "' "
@@ -79,8 +79,8 @@
 static void DumpSummaryLine(std::ostream& os, mirror::Object* obj, size_t element_count,
                             int identical, int equiv)
     SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
-  if (obj == NULL) {
-    os << "    NULL reference (count=" << equiv << ")\n";
+  if (obj == nullptr) {
+    os << "    null reference (count=" << equiv << ")\n";
     return;
   }
   if (Runtime::Current()->IsClearedJniWeakGlobal(obj)) {
diff --git a/runtime/reference_table_test.cc b/runtime/reference_table_test.cc
index db98e1f..4ffebf2 100644
--- a/runtime/reference_table_test.cc
+++ b/runtime/reference_table_test.cc
@@ -40,8 +40,8 @@
     EXPECT_EQ(0U, rt.Size());
   }
 
-  // Check removal of all NULLs in a empty table is a no-op.
-  rt.Remove(NULL);
+  // Check removal of all nullss in a empty table is a no-op.
+  rt.Remove(nullptr);
   EXPECT_EQ(0U, rt.Size());
 
   // Check removal of all o1 in a empty table is a no-op.
diff --git a/runtime/reflection_test.cc b/runtime/reflection_test.cc
index 7aefdaa..a62bc5e 100644
--- a/runtime/reflection_test.cc
+++ b/runtime/reflection_test.cc
@@ -37,35 +37,35 @@
     // Turn on -verbose:jni for the JNI tests.
     // gLogVerbosity.jni = true;
 
-    vm_->AttachCurrentThread(&env_, NULL);
+    vm_->AttachCurrentThread(&env_, nullptr);
 
     ScopedLocalRef<jclass> aioobe(env_,
                                   env_->FindClass("java/lang/ArrayIndexOutOfBoundsException"));
-    CHECK(aioobe.get() != NULL);
+    CHECK(aioobe.get() != nullptr);
     aioobe_ = reinterpret_cast<jclass>(env_->NewGlobalRef(aioobe.get()));
 
     ScopedLocalRef<jclass> ase(env_, env_->FindClass("java/lang/ArrayStoreException"));
-    CHECK(ase.get() != NULL);
+    CHECK(ase.get() != nullptr);
     ase_ = reinterpret_cast<jclass>(env_->NewGlobalRef(ase.get()));
 
     ScopedLocalRef<jclass> sioobe(env_,
                                   env_->FindClass("java/lang/StringIndexOutOfBoundsException"));
-    CHECK(sioobe.get() != NULL);
+    CHECK(sioobe.get() != nullptr);
     sioobe_ = reinterpret_cast<jclass>(env_->NewGlobalRef(sioobe.get()));
   }
 
   void CleanUpJniEnv() {
-    if (aioobe_ != NULL) {
+    if (aioobe_ != nullptr) {
       env_->DeleteGlobalRef(aioobe_);
-      aioobe_ = NULL;
+      aioobe_ = nullptr;
     }
-    if (ase_ != NULL) {
+    if (ase_ != nullptr) {
       env_->DeleteGlobalRef(ase_);
-      ase_ = NULL;
+      ase_ = nullptr;
     }
-    if (sioobe_ != NULL) {
+    if (sioobe_ != nullptr) {
       env_->DeleteGlobalRef(sioobe_);
-      sioobe_ = NULL;
+      sioobe_ = nullptr;
     }
   }
 
@@ -105,7 +105,7 @@
 
     mirror::Class* c = class_linker_->FindClass(self, DotToDescriptor(class_name).c_str(),
                                                 class_loader);
-    CHECK(c != NULL);
+    CHECK(c != nullptr);
 
     *method = is_static ? c->FindDirectMethod(method_name, method_signature)
                         : c->FindVirtualMethod(method_name, method_signature);
@@ -501,10 +501,10 @@
   CompileDirectMethod(class_loader, "Main", "main", "([Ljava/lang/String;)V");
 
   mirror::Class* klass = class_linker_->FindClass(soa.Self(), "LMain;", class_loader);
-  ASSERT_TRUE(klass != NULL);
+  ASSERT_TRUE(klass != nullptr);
 
   mirror::ArtMethod* method = klass->FindDirectMethod("main", "([Ljava/lang/String;)V");
-  ASSERT_TRUE(method != NULL);
+  ASSERT_TRUE(method != nullptr);
 
   // Start runtime.
   bool started = runtime_->Start();
diff --git a/runtime/runtime.cc b/runtime/runtime.cc
index 2fc8d20..48bca62 100644
--- a/runtime/runtime.cc
+++ b/runtime/runtime.cc
@@ -289,7 +289,7 @@
     }
     gAborting++;
     os << "Runtime aborting...\n";
-    if (Runtime::Current() == NULL) {
+    if (Runtime::Current() == nullptr) {
       os << "(Runtime does not yet exist!)\n";
       return;
     }
@@ -350,7 +350,7 @@
   MutexLock mu(Thread::Current(), *Locks::abort_lock_);
 
   // Get any pending output out of the way.
-  fflush(NULL);
+  fflush(nullptr);
 
   // Many people have difficulty distinguish aborts from crashes,
   // so be explicit.
@@ -358,7 +358,7 @@
   LOG(INTERNAL_FATAL) << Dumpable<AbortState>(state);
 
   // Call the abort hook if we have one.
-  if (Runtime::Current() != NULL && Runtime::Current()->abort_ != NULL) {
+  if (Runtime::Current() != nullptr && Runtime::Current()->abort_ != nullptr) {
     LOG(INTERNAL_FATAL) << "Calling abort hook...";
     Runtime::Current()->abort_();
     // notreached
@@ -386,7 +386,7 @@
 }
 
 void Runtime::CallExitHook(jint status) {
-  if (exit_ != NULL) {
+  if (exit_ != nullptr) {
     ScopedThreadStateChange tsc(Thread::Current(), kNative);
     exit_(status);
     LOG(WARNING) << "Exit hook returned instead of exiting!";
@@ -401,16 +401,16 @@
 
 bool Runtime::Create(const RuntimeOptions& options, bool ignore_unrecognized) {
   // TODO: acquire a static mutex on Runtime to avoid racing.
-  if (Runtime::instance_ != NULL) {
+  if (Runtime::instance_ != nullptr) {
     return false;
   }
-  InitLogging(NULL);  // Calls Locks::Init() as a side effect.
+  InitLogging(nullptr);  // Calls Locks::Init() as a side effect.
   instance_ = new Runtime;
   if (!instance_->Init(options, ignore_unrecognized)) {
     // TODO: Currently deleting the instance will abort the runtime on destruction. Now This will
     // leak memory, instead. Fix the destructor. b/19100793.
     // delete instance_;
-    instance_ = NULL;
+    instance_ = nullptr;
     return false;
   }
   return true;
@@ -431,7 +431,7 @@
 
   mirror::ArtMethod* getSystemClassLoader =
       class_loader_class->FindDirectMethod("getSystemClassLoader", "()Ljava/lang/ClassLoader;");
-  CHECK(getSystemClassLoader != NULL);
+  CHECK(getSystemClassLoader != nullptr);
 
   JValue result = InvokeWithJValues(soa, nullptr, soa.EncodeMethod(getSystemClassLoader), nullptr);
   JNIEnv* env = soa.Self()->GetJniEnv();
@@ -447,7 +447,7 @@
 
   ArtField* contextClassLoader =
       thread_class->FindDeclaredInstanceField("contextClassLoader", "Ljava/lang/ClassLoader;");
-  CHECK(contextClassLoader != NULL);
+  CHECK(contextClassLoader != nullptr);
 
   // We can't run in a transaction yet.
   contextClassLoader->SetObject<false>(soa.Self()->GetPeer(),
@@ -590,7 +590,7 @@
 
   // Mark rootfs as being a slave so that changes from default
   // namespace only flow into our children.
-  if (mount("rootfs", "/", NULL, (MS_SLAVE | MS_REC), NULL) == -1) {
+  if (mount("rootfs", "/", nullptr, (MS_SLAVE | MS_REC), nullptr) == -1) {
     PLOG(WARNING) << "Failed to mount() rootfs as MS_SLAVE";
     return false;
   }
@@ -599,7 +599,7 @@
   // bind mount storage into their respective private namespaces, which
   // are isolated from each other.
   const char* target_base = getenv("EMULATED_STORAGE_TARGET");
-  if (target_base != NULL) {
+  if (target_base != nullptr) {
     if (mount("tmpfs", target_base, "tmpfs", MS_NOSUID | MS_NODEV,
               "uid=0,gid=1028,mode=0751") == -1) {
       LOG(WARNING) << "Failed to mount tmpfs to " << target_base;
@@ -677,7 +677,7 @@
 static bool OpenDexFilesFromImage(const std::string& image_location,
                                   std::vector<std::unique_ptr<const DexFile>>* dex_files,
                                   size_t* failures) {
-  DCHECK(dex_files != nullptr) << "OpenDexFilesFromImage: out-param is NULL";
+  DCHECK(dex_files != nullptr) << "OpenDexFilesFromImage: out-param is nullptr";
   std::string system_filename;
   bool has_system = false;
   std::string cache_filename_unused;
@@ -737,7 +737,7 @@
                            const std::vector<std::string>& dex_locations,
                            const std::string& image_location,
                            std::vector<std::unique_ptr<const DexFile>>* dex_files) {
-  DCHECK(dex_files != nullptr) << "OpenDexFiles: out-param is NULL";
+  DCHECK(dex_files != nullptr) << "OpenDexFiles: out-param is nullptr";
   size_t failure_count = 0;
   if (!image_location.empty() && OpenDexFilesFromImage(image_location, dex_files, &failure_count)) {
     return failure_count;
@@ -870,7 +870,7 @@
     // If we are already the compiler at this point, we must be dex2oat. Don't create the jit in
     // this case.
     // If runtime_options doesn't have UseJIT set to true then CreateFromRuntimeArguments returns
-    // nullptr and we don't create the jit.
+    // null and we don't create the jit.
     use_jit = false;
   }
 
@@ -1129,26 +1129,26 @@
       env->NewGlobalRef(env->GetStaticObjectField(
           WellKnownClasses::java_lang_ThreadGroup,
           WellKnownClasses::java_lang_ThreadGroup_mainThreadGroup));
-  CHECK(main_thread_group_ != NULL || IsAotCompiler());
+  CHECK(main_thread_group_ != nullptr || IsAotCompiler());
   system_thread_group_ =
       env->NewGlobalRef(env->GetStaticObjectField(
           WellKnownClasses::java_lang_ThreadGroup,
           WellKnownClasses::java_lang_ThreadGroup_systemThreadGroup));
-  CHECK(system_thread_group_ != NULL || IsAotCompiler());
+  CHECK(system_thread_group_ != nullptr || IsAotCompiler());
 }
 
 jobject Runtime::GetMainThreadGroup() const {
-  CHECK(main_thread_group_ != NULL || IsAotCompiler());
+  CHECK(main_thread_group_ != nullptr || IsAotCompiler());
   return main_thread_group_;
 }
 
 jobject Runtime::GetSystemThreadGroup() const {
-  CHECK(system_thread_group_ != NULL || IsAotCompiler());
+  CHECK(system_thread_group_ != nullptr || IsAotCompiler());
   return system_thread_group_;
 }
 
 jobject Runtime::GetSystemClassLoader() const {
-  CHECK(system_class_loader_ != NULL || IsAotCompiler());
+  CHECK(system_class_loader_ != nullptr || IsAotCompiler());
   return system_class_loader_;
 }
 
@@ -1274,12 +1274,12 @@
 
 bool Runtime::AttachCurrentThread(const char* thread_name, bool as_daemon, jobject thread_group,
                                   bool create_peer) {
-  return Thread::Attach(thread_name, as_daemon, thread_group, create_peer) != NULL;
+  return Thread::Attach(thread_name, as_daemon, thread_group, create_peer) != nullptr;
 }
 
 void Runtime::DetachCurrentThread() {
   Thread* self = Thread::Current();
-  if (self == NULL) {
+  if (self == nullptr) {
     LOG(FATAL) << "attempting to detach thread that is not attached";
   }
   if (self->HasManagedStack()) {
diff --git a/runtime/runtime.h b/runtime/runtime.h
index d95640d..c35f4ca 100644
--- a/runtime/runtime.h
+++ b/runtime/runtime.h
@@ -248,7 +248,7 @@
   }
 
   InternTable* GetInternTable() const {
-    DCHECK(intern_table_ != NULL);
+    DCHECK(intern_table_ != nullptr);
     return intern_table_;
   }
 
@@ -328,7 +328,7 @@
   void VisitNonConcurrentRoots(RootVisitor* visitor)
       SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
 
-  // Sweep system weaks, the system weak is deleted if the visitor return nullptr. Otherwise, the
+  // Sweep system weaks, the system weak is deleted if the visitor return null. Otherwise, the
   // system weak is updated to be the visitor's returned value.
   void SweepSystemWeaks(IsMarkedCallback* visitor, void* arg)
       SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
@@ -574,7 +574,7 @@
   void StartDaemonThreads();
   void StartSignalCatcher();
 
-  // A pointer to the active runtime or NULL.
+  // A pointer to the active runtime or null.
   static Runtime* instance_;
 
   // NOTE: these must match the gc::ProcessState values as they come directly from the framework.
diff --git a/runtime/runtime_linux.cc b/runtime/runtime_linux.cc
index 35d944f..d65e18e 100644
--- a/runtime/runtime_linux.cc
+++ b/runtime/runtime_linux.cc
@@ -321,7 +321,7 @@
 
   OsInfo os_info;
   const char* cmd_line = GetCmdLine();
-  if (cmd_line == NULL) {
+  if (cmd_line == nullptr) {
     cmd_line = "<unset>";  // Because no-one called InitLogging.
   }
   pid_t tid = GetTid();
@@ -353,9 +353,10 @@
       heap->DumpObject(LOG(INTERNAL_FATAL), reinterpret_cast<mirror::Object*>(info->si_addr));
     }
   }
-  if (getenv("debug_db_uid") != NULL || getenv("art_wait_for_gdb_on_crash") != NULL) {
+  if (getenv("debug_db_uid") != nullptr || getenv("art_wait_for_gdb_on_crash") != nullptr) {
     LOG(INTERNAL_FATAL) << "********************************************************\n"
-                        << "* Process " << getpid() << " thread " << tid << " \"" << thread_name << "\""
+                        << "* Process " << getpid() << " thread " << tid << " \"" << thread_name
+                        << "\""
                         << " has been suspended while crashing.\n"
                         << "* Attach gdb:\n"
                         << "*     gdb -p " << tid << "\n"
@@ -370,7 +371,7 @@
   memset(&action, 0, sizeof(action));
   sigemptyset(&action.sa_mask);
   action.sa_handler = SIG_DFL;
-  sigaction(signal_number, &action, NULL);
+  sigaction(signal_number, &action, nullptr);
   // ...and re-raise so we die with the appropriate status.
   kill(getpid(), signal_number);
 #else
@@ -390,19 +391,19 @@
   action.sa_flags |= SA_ONSTACK;
 
   int rc = 0;
-  rc += sigaction(SIGABRT, &action, NULL);
-  rc += sigaction(SIGBUS, &action, NULL);
-  rc += sigaction(SIGFPE, &action, NULL);
-  rc += sigaction(SIGILL, &action, NULL);
-  rc += sigaction(SIGPIPE, &action, NULL);
-  rc += sigaction(SIGSEGV, &action, NULL);
+  rc += sigaction(SIGABRT, &action, nullptr);
+  rc += sigaction(SIGBUS, &action, nullptr);
+  rc += sigaction(SIGFPE, &action, nullptr);
+  rc += sigaction(SIGILL, &action, nullptr);
+  rc += sigaction(SIGPIPE, &action, nullptr);
+  rc += sigaction(SIGSEGV, &action, nullptr);
 #if defined(SIGSTKFLT)
-  rc += sigaction(SIGSTKFLT, &action, NULL);
+  rc += sigaction(SIGSTKFLT, &action, nullptr);
 #endif
-  rc += sigaction(SIGTRAP, &action, NULL);
+  rc += sigaction(SIGTRAP, &action, nullptr);
   // Special dump-all timeout.
   if (GetTimeoutSignal() != -1) {
-    rc += sigaction(GetTimeoutSignal(), &action, NULL);
+    rc += sigaction(GetTimeoutSignal(), &action, nullptr);
   }
   CHECK_EQ(rc, 0);
 }
diff --git a/runtime/scoped_thread_state_change.h b/runtime/scoped_thread_state_change.h
index 11b7df6..b93fcb4 100644
--- a/runtime/scoped_thread_state_change.h
+++ b/runtime/scoped_thread_state_change.h
@@ -36,11 +36,11 @@
   ScopedThreadStateChange(Thread* self, ThreadState new_thread_state)
       LOCKS_EXCLUDED(Locks::thread_suspend_count_lock_) ALWAYS_INLINE
       : self_(self), thread_state_(new_thread_state), expected_has_no_thread_(false) {
-    if (UNLIKELY(self_ == NULL)) {
-      // Value chosen arbitrarily and won't be used in the destructor since thread_ == NULL.
+    if (UNLIKELY(self_ == nullptr)) {
+      // Value chosen arbitrarily and won't be used in the destructor since thread_ == null.
       old_thread_state_ = kTerminated;
       Runtime* runtime = Runtime::Current();
-      CHECK(runtime == NULL || !runtime->IsStarted() || runtime->IsShuttingDown(self_));
+      CHECK(runtime == nullptr || !runtime->IsStarted() || runtime->IsShuttingDown(self_));
     } else {
       DCHECK_EQ(self, Thread::Current());
       // Read state without locks, ok as state is effectively thread local and we're not interested
@@ -60,10 +60,10 @@
   }
 
   ~ScopedThreadStateChange() LOCKS_EXCLUDED(Locks::thread_suspend_count_lock_) ALWAYS_INLINE {
-    if (UNLIKELY(self_ == NULL)) {
+    if (UNLIKELY(self_ == nullptr)) {
       if (!expected_has_no_thread_) {
         Runtime* runtime = Runtime::Current();
-        bool shutting_down = (runtime == NULL) || runtime->IsShuttingDown(nullptr);
+        bool shutting_down = (runtime == nullptr) || runtime->IsShuttingDown(nullptr);
         CHECK(shutting_down);
       }
     } else {
@@ -87,7 +87,7 @@
  protected:
   // Constructor used by ScopedJniThreadState for an unattached thread that has access to the VM*.
   ScopedThreadStateChange()
-      : self_(NULL), thread_state_(kTerminated), old_thread_state_(kTerminated),
+      : self_(nullptr), thread_state_(kTerminated), old_thread_state_(kTerminated),
         expected_has_no_thread_(true) {}
 
   Thread* const self_;
@@ -124,7 +124,7 @@
    * Add a local reference for an object to the indirect reference table associated with the
    * current stack frame.  When the native function returns, the reference will be discarded.
    *
-   * We need to allow the same reference to be added multiple times, and cope with NULL.
+   * We need to allow the same reference to be added multiple times, and cope with nullptr.
    *
    * This will be called on otherwise unreferenced objects. We cannot do GC allocations here, and
    * it's best if we don't grab a mutex.
@@ -133,8 +133,8 @@
   T AddLocalReference(mirror::Object* obj) const SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
     Locks::mutator_lock_->AssertSharedHeld(Self());
     DCHECK(IsRunnable());  // Don't work with raw objects in non-runnable states.
-    if (obj == NULL) {
-      return NULL;
+    if (obj == nullptr) {
+      return nullptr;
     }
     DCHECK_NE((reinterpret_cast<uintptr_t>(obj) & 0xffff0000), 0xebad0000);
     return Env()->AddLocalReference<T>(obj);
diff --git a/runtime/signal_catcher.cc b/runtime/signal_catcher.cc
index 26bf655..863d59b 100644
--- a/runtime/signal_catcher.cc
+++ b/runtime/signal_catcher.cc
@@ -53,7 +53,7 @@
 
     os << "Cmd line: " << current_cmd_line << "\n";
     const char* stashed_cmd_line = GetCmdLine();
-    if (stashed_cmd_line != NULL && current_cmd_line != stashed_cmd_line
+    if (stashed_cmd_line != nullptr && current_cmd_line != stashed_cmd_line
             && strcmp(stashed_cmd_line, "<unset>") != 0) {
       os << "Original command line: " << stashed_cmd_line << "\n";
     }
@@ -67,15 +67,15 @@
     : stack_trace_file_(stack_trace_file),
       lock_("SignalCatcher lock"),
       cond_("SignalCatcher::cond_", lock_),
-      thread_(NULL) {
+      thread_(nullptr) {
   SetHaltFlag(false);
 
   // Create a raw pthread; its start routine will attach to the runtime.
-  CHECK_PTHREAD_CALL(pthread_create, (&pthread_, NULL, &Run, this), "signal catcher thread");
+  CHECK_PTHREAD_CALL(pthread_create, (&pthread_, nullptr, &Run, this), "signal catcher thread");
 
   Thread* self = Thread::Current();
   MutexLock mu(self, lock_);
-  while (thread_ == NULL) {
+  while (thread_ == nullptr) {
     cond_.Wait(self);
   }
 }
@@ -85,7 +85,7 @@
   // to arrive, send it one.
   SetHaltFlag(true);
   CHECK_PTHREAD_CALL(pthread_kill, (pthread_, SIGQUIT), "signal catcher shutdown");
-  CHECK_PTHREAD_CALL(pthread_join, (pthread_, NULL), "signal catcher shutdown");
+  CHECK_PTHREAD_CALL(pthread_join, (pthread_, nullptr), "signal catcher shutdown");
 }
 
 void SignalCatcher::SetHaltFlag(bool new_value) {
@@ -176,7 +176,7 @@
 
 void* SignalCatcher::Run(void* arg) {
   SignalCatcher* signal_catcher = reinterpret_cast<SignalCatcher*>(arg);
-  CHECK(signal_catcher != NULL);
+  CHECK(signal_catcher != nullptr);
 
   Runtime* runtime = Runtime::Current();
   CHECK(runtime->AttachCurrentThread("Signal Catcher", true, runtime->GetSystemThreadGroup(),
@@ -199,7 +199,7 @@
     int signal_number = signal_catcher->WaitForSignal(self, signals);
     if (signal_catcher->ShouldHalt()) {
       runtime->DetachCurrentThread();
-      return NULL;
+      return nullptr;
     }
 
     switch (signal_number) {
diff --git a/runtime/signal_set.h b/runtime/signal_set.h
index 3b89e6e..c272514 100644
--- a/runtime/signal_set.h
+++ b/runtime/signal_set.h
@@ -38,7 +38,7 @@
   }
 
   void Block() {
-    if (sigprocmask(SIG_BLOCK, &set_, NULL) == -1) {
+    if (sigprocmask(SIG_BLOCK, &set_, nullptr) == -1) {
       PLOG(FATAL) << "sigprocmask failed";
     }
   }
diff --git a/runtime/stack.cc b/runtime/stack.cc
index 4ae49dd..aa3e320 100644
--- a/runtime/stack.cc
+++ b/runtime/stack.cc
@@ -36,12 +36,12 @@
 mirror::Object* ShadowFrame::GetThisObject() const {
   mirror::ArtMethod* m = GetMethod();
   if (m->IsStatic()) {
-    return NULL;
+    return nullptr;
   } else if (m->IsNative()) {
     return GetVRegReference(0);
   } else {
     const DexFile::CodeItem* code_item = m->GetCodeItem();
-    CHECK(code_item != NULL) << PrettyMethod(m);
+    CHECK(code_item != nullptr) << PrettyMethod(m);
     uint16_t reg = code_item->registers_size_ - code_item->ins_size_;
     return GetVRegReference(reg);
   }
@@ -50,7 +50,7 @@
 mirror::Object* ShadowFrame::GetThisObject(uint16_t num_ins) const {
   mirror::ArtMethod* m = GetMethod();
   if (m->IsStatic()) {
-    return NULL;
+    return nullptr;
   } else {
     return GetVRegReference(NumberOfVRegs() - num_ins);
   }
@@ -58,9 +58,9 @@
 
 size_t ManagedStack::NumJniShadowFrameReferences() const {
   size_t count = 0;
-  for (const ManagedStack* current_fragment = this; current_fragment != NULL;
+  for (const ManagedStack* current_fragment = this; current_fragment != nullptr;
        current_fragment = current_fragment->GetLink()) {
-    for (ShadowFrame* current_frame = current_fragment->top_shadow_frame_; current_frame != NULL;
+    for (ShadowFrame* current_frame = current_fragment->top_shadow_frame_; current_frame != nullptr;
          current_frame = current_frame->GetLink()) {
       if (current_frame->GetMethod()->IsNative()) {
         // The JNI ShadowFrame only contains references. (For indirect reference.)
@@ -72,9 +72,9 @@
 }
 
 bool ManagedStack::ShadowFramesContain(StackReference<mirror::Object>* shadow_frame_entry) const {
-  for (const ManagedStack* current_fragment = this; current_fragment != NULL;
+  for (const ManagedStack* current_fragment = this; current_fragment != nullptr;
        current_fragment = current_fragment->GetLink()) {
-    for (ShadowFrame* current_frame = current_fragment->top_shadow_frame_; current_frame != NULL;
+    for (ShadowFrame* current_frame = current_fragment->top_shadow_frame_; current_frame != nullptr;
          current_frame = current_frame->GetLink()) {
       if (current_frame->Contains(shadow_frame_entry)) {
         return true;
@@ -85,23 +85,23 @@
 }
 
 StackVisitor::StackVisitor(Thread* thread, Context* context)
-    : thread_(thread), cur_shadow_frame_(NULL),
-      cur_quick_frame_(NULL), cur_quick_frame_pc_(0), num_frames_(0), cur_depth_(0),
+    : thread_(thread), cur_shadow_frame_(nullptr),
+      cur_quick_frame_(nullptr), cur_quick_frame_pc_(0), num_frames_(0), cur_depth_(0),
       context_(context) {
   DCHECK(thread == Thread::Current() || thread->IsSuspended()) << *thread;
 }
 
 StackVisitor::StackVisitor(Thread* thread, Context* context, size_t num_frames)
-    : thread_(thread), cur_shadow_frame_(NULL),
-      cur_quick_frame_(NULL), cur_quick_frame_pc_(0), num_frames_(num_frames), cur_depth_(0),
+    : thread_(thread), cur_shadow_frame_(nullptr),
+      cur_quick_frame_(nullptr), cur_quick_frame_pc_(0), num_frames_(num_frames), cur_depth_(0),
       context_(context) {
   DCHECK(thread == Thread::Current() || thread->IsSuspended()) << *thread;
 }
 
 uint32_t StackVisitor::GetDexPc(bool abort_on_failure) const {
-  if (cur_shadow_frame_ != NULL) {
+  if (cur_shadow_frame_ != nullptr) {
     return cur_shadow_frame_->GetDexPC();
-  } else if (cur_quick_frame_ != NULL) {
+  } else if (cur_quick_frame_ != nullptr) {
     return GetMethod()->ToDexPc(cur_quick_frame_pc_, abort_on_failure);
   } else {
     return 0;
@@ -183,7 +183,7 @@
     return GetRegisterIfAccessible(reg, kind, val);
   } else {
     const DexFile::CodeItem* code_item = m->GetCodeItem();
-    DCHECK(code_item != nullptr) << PrettyMethod(m);  // Can't be NULL or how would we compile
+    DCHECK(code_item != nullptr) << PrettyMethod(m);  // Can't be null or how would we compile
                                                       // its instructions?
     *val = *GetVRegAddrFromQuickCode(cur_quick_frame_, code_item, frame_info.CoreSpillMask(),
                                      frame_info.FpSpillMask(), frame_info.FrameSizeInBytes(), vreg);
@@ -199,7 +199,7 @@
   CodeInfo code_info = m->GetOptimizedCodeInfo();
   StackMap stack_map = code_info.GetStackMapForNativePcOffset(native_pc_offset);
   const DexFile::CodeItem* code_item = m->GetCodeItem();
-  DCHECK(code_item != nullptr) << PrettyMethod(m);  // Can't be NULL or how would we compile
+  DCHECK(code_item != nullptr) << PrettyMethod(m);  // Can't be null or how would we compile
                                                     // its instructions?
   DCHECK_LT(vreg, code_item->registers_size_);
   uint16_t number_of_dex_registers = code_item->registers_size_;
@@ -297,7 +297,7 @@
     return GetRegisterPairIfAccessible(reg_lo, reg_hi, kind_lo, val);
   } else {
     const DexFile::CodeItem* code_item = m->GetCodeItem();
-    DCHECK(code_item != nullptr) << PrettyMethod(m);  // Can't be NULL or how would we compile
+    DCHECK(code_item != nullptr) << PrettyMethod(m);  // Can't be null or how would we compile
                                                       // its instructions?
     uint32_t* addr = GetVRegAddrFromQuickCode(
         cur_quick_frame_, code_item, frame_info.CoreSpillMask(),
@@ -372,7 +372,7 @@
     return SetRegisterIfAccessible(reg, new_value, kind);
   } else {
     const DexFile::CodeItem* code_item = m->GetCodeItem();
-    DCHECK(code_item != nullptr) << PrettyMethod(m);  // Can't be NULL or how would we compile
+    DCHECK(code_item != nullptr) << PrettyMethod(m);  // Can't be null or how would we compile
                                                       // its instructions?
     uint32_t* addr = GetVRegAddrFromQuickCode(
         cur_quick_frame_, code_item, frame_info.CoreSpillMask(),
@@ -390,7 +390,7 @@
   CodeInfo code_info = m->GetOptimizedCodeInfo();
   StackMap stack_map = code_info.GetStackMapForNativePcOffset(native_pc_offset);
   const DexFile::CodeItem* code_item = m->GetCodeItem();
-  DCHECK(code_item != nullptr) << PrettyMethod(m);  // Can't be NULL or how would we compile
+  DCHECK(code_item != nullptr) << PrettyMethod(m);  // Can't be null or how would we compile
                                                     // its instructions?
   uint16_t number_of_dex_registers = code_item->registers_size_;
   DCHECK_LT(vreg, number_of_dex_registers);
@@ -488,8 +488,8 @@
   }
 }
 
-bool StackVisitor::SetVRegPairFromQuickCode(mirror::ArtMethod* m, uint16_t vreg, uint64_t new_value,
-                                            VRegKind kind_lo, VRegKind kind_hi) {
+bool StackVisitor::SetVRegPairFromQuickCode(
+    mirror::ArtMethod* m, uint16_t vreg, uint64_t new_value, VRegKind kind_lo, VRegKind kind_hi) {
   const void* code_pointer = m->GetQuickOatCodePointer(sizeof(void*));
   DCHECK(code_pointer != nullptr);
   const VmapTable vmap_table(m->GetVmapTable(code_pointer, sizeof(void*)));
@@ -505,7 +505,7 @@
     return SetRegisterPairIfAccessible(reg_lo, reg_hi, new_value, is_float);
   } else {
     const DexFile::CodeItem* code_item = m->GetCodeItem();
-    DCHECK(code_item != nullptr) << PrettyMethod(m);  // Can't be NULL or how would we compile
+    DCHECK(code_item != nullptr) << PrettyMethod(m);  // Can't be null or how would we compile
                                                       // its instructions?
     uint32_t* addr = GetVRegAddrFromQuickCode(
         cur_quick_frame_, code_item, frame_info.CoreSpillMask(),
@@ -515,8 +515,8 @@
   }
 }
 
-bool StackVisitor::SetVRegPairFromOptimizedCode(mirror::ArtMethod* m, uint16_t vreg, uint64_t new_value,
-                                                VRegKind kind_lo, VRegKind kind_hi) {
+bool StackVisitor::SetVRegPairFromOptimizedCode(
+    mirror::ArtMethod* m, uint16_t vreg, uint64_t new_value, VRegKind kind_lo, VRegKind kind_hi) {
   uint32_t low_32bits = Low32Bits(new_value);
   uint32_t high_32bits = High32Bits(new_value);
   bool success = SetVRegFromOptimizedCode(m, vreg, low_32bits, kind_lo);
@@ -585,14 +585,14 @@
 
 uintptr_t StackVisitor::GetReturnPc() const {
   uint8_t* sp = reinterpret_cast<uint8_t*>(GetCurrentQuickFrame());
-  DCHECK(sp != NULL);
+  DCHECK(sp != nullptr);
   uint8_t* pc_addr = sp + GetMethod()->GetReturnPcOffset().SizeValue();
   return *reinterpret_cast<uintptr_t*>(pc_addr);
 }
 
 void StackVisitor::SetReturnPc(uintptr_t new_ret_pc) {
   uint8_t* sp = reinterpret_cast<uint8_t*>(GetCurrentQuickFrame());
-  CHECK(sp != NULL);
+  CHECK(sp != nullptr);
   uint8_t* pc_addr = sp + GetMethod()->GetReturnPcOffset().SizeValue();
   *reinterpret_cast<uintptr_t*>(pc_addr) = new_ret_pc;
 }
@@ -600,7 +600,7 @@
 size_t StackVisitor::ComputeNumFrames(Thread* thread) {
   struct NumFramesVisitor : public StackVisitor {
     explicit NumFramesVisitor(Thread* thread_in)
-        : StackVisitor(thread_in, NULL), frames(0) {}
+        : StackVisitor(thread_in, nullptr), frames(0) {}
 
     bool VisitFrame() OVERRIDE {
       frames++;
@@ -652,7 +652,7 @@
 void StackVisitor::DescribeStack(Thread* thread) {
   struct DescribeStackVisitor : public StackVisitor {
     explicit DescribeStackVisitor(Thread* thread_in)
-        : StackVisitor(thread_in, NULL) {}
+        : StackVisitor(thread_in, nullptr) {}
 
     bool VisitFrame() OVERRIDE SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
       LOG(INFO) << "Frame Id=" << GetFrameId() << " " << DescribeLocation();
@@ -666,7 +666,7 @@
 std::string StackVisitor::DescribeLocation() const {
   std::string result("Visiting method '");
   mirror::ArtMethod* m = GetMethod();
-  if (m == NULL) {
+  if (m == nullptr) {
     return "upcall";
   }
   result += PrettyMethod(m);
@@ -713,24 +713,24 @@
   bool exit_stubs_installed = Runtime::Current()->GetInstrumentation()->AreExitStubsInstalled();
   uint32_t instrumentation_stack_depth = 0;
 
-  for (const ManagedStack* current_fragment = thread_->GetManagedStack(); current_fragment != NULL;
-       current_fragment = current_fragment->GetLink()) {
+  for (const ManagedStack* current_fragment = thread_->GetManagedStack();
+       current_fragment != nullptr; current_fragment = current_fragment->GetLink()) {
     cur_shadow_frame_ = current_fragment->GetTopShadowFrame();
     cur_quick_frame_ = current_fragment->GetTopQuickFrame();
     cur_quick_frame_pc_ = 0;
 
-    if (cur_quick_frame_ != NULL) {  // Handle quick stack frames.
+    if (cur_quick_frame_ != nullptr) {  // Handle quick stack frames.
       // Can't be both a shadow and a quick fragment.
-      DCHECK(current_fragment->GetTopShadowFrame() == NULL);
+      DCHECK(current_fragment->GetTopShadowFrame() == nullptr);
       mirror::ArtMethod* method = cur_quick_frame_->AsMirrorPtr();
-      while (method != NULL) {
+      while (method != nullptr) {
         SanityCheckFrame();
         bool should_continue = VisitFrame();
         if (UNLIKELY(!should_continue)) {
           return;
         }
 
-        if (context_ != NULL) {
+        if (context_ != nullptr) {
           context_->FillCalleeSaves(*this);
         }
         size_t frame_size = method->GetFrameSizeInBytes();
@@ -748,7 +748,8 @@
             if (GetMethod() == Runtime::Current()->GetCalleeSaveMethod(Runtime::kSaveAll)) {
               // Skip runtime save all callee frames which are used to deliver exceptions.
             } else if (instrumentation_frame.interpreter_entry_) {
-              mirror::ArtMethod* callee = Runtime::Current()->GetCalleeSaveMethod(Runtime::kRefsAndArgs);
+              mirror::ArtMethod* callee =
+                  Runtime::Current()->GetCalleeSaveMethod(Runtime::kRefsAndArgs);
               CHECK_EQ(GetMethod(), callee) << "Expected: " << PrettyMethod(callee) << " Found: "
                                             << PrettyMethod(GetMethod());
             } else if (instrumentation_frame.method_ != GetMethod()) {
@@ -771,7 +772,7 @@
         cur_depth_++;
         method = cur_quick_frame_->AsMirrorPtr();
       }
-    } else if (cur_shadow_frame_ != NULL) {
+    } else if (cur_shadow_frame_ != nullptr) {
       do {
         SanityCheckFrame();
         bool should_continue = VisitFrame();
@@ -780,7 +781,7 @@
         }
         cur_depth_++;
         cur_shadow_frame_ = cur_shadow_frame_->GetLink();
-      } while (cur_shadow_frame_ != NULL);
+      } while (cur_shadow_frame_ != nullptr);
     }
     if (include_transitions) {
       bool should_continue = VisitFrame();
diff --git a/runtime/stack.h b/runtime/stack.h
index fbb0aa4..ed9e458 100644
--- a/runtime/stack.h
+++ b/runtime/stack.h
@@ -295,11 +295,12 @@
   }
 
   StackReference<mirror::Object>* References() {
-    return const_cast<StackReference<mirror::Object>*>(const_cast<const ShadowFrame*>(this)->References());
+    return const_cast<StackReference<mirror::Object>*>(
+        const_cast<const ShadowFrame*>(this)->References());
   }
 
   const uint32_t number_of_vregs_;
-  // Link to previous shadow frame or NULL.
+  // Link to previous shadow frame or null.
   ShadowFrame* link_;
   mirror::ArtMethod* method_;
   uint32_t dex_pc_;
@@ -571,7 +572,8 @@
        * Special temporaries may have custom locations and the logic above deals with that.
        * However, non-special temporaries are placed relative to the outs.
        */
-      int temps_start = sizeof(StackReference<mirror::ArtMethod>) + code_item->outs_size_ * sizeof(uint32_t);
+      int temps_start = sizeof(StackReference<mirror::ArtMethod>) +
+          code_item->outs_size_ * sizeof(uint32_t);
       int relative_offset = (reg - (temp_threshold + max_num_special_temps)) * sizeof(uint32_t);
       return temps_start + relative_offset;
     }  else if (reg < num_regs) {
diff --git a/runtime/thread-inl.h b/runtime/thread-inl.h
index 16add79..f7ef894 100644
--- a/runtime/thread-inl.h
+++ b/runtime/thread-inl.h
@@ -35,10 +35,10 @@
 }
 
 inline Thread* Thread::Current() {
-  // We rely on Thread::Current returning NULL for a detached thread, so it's not obvious
+  // We rely on Thread::Current returning null for a detached thread, so it's not obvious
   // that we can replace this with a direct %fs access on x86.
   if (!is_started_) {
-    return NULL;
+    return nullptr;
   } else {
     void* thread = pthread_getspecific(Thread::pthread_key_self_);
     return reinterpret_cast<Thread*>(thread);
@@ -92,7 +92,7 @@
         // We expect no locks except the mutator_lock_ or thread list suspend thread lock.
         if (i != kMutatorLock) {
           BaseMutex* held_mutex = GetHeldMutex(static_cast<LockLevel>(i));
-          if (held_mutex != NULL) {
+          if (held_mutex != nullptr) {
             LOG(ERROR) << "holding \"" << held_mutex->GetName()
                       << "\" at point where thread suspension is expected";
             bad_mutexes_held = true;
diff --git a/runtime/thread.cc b/runtime/thread.cc
index 5ca51fb..fa65bce 100644
--- a/runtime/thread.cc
+++ b/runtime/thread.cc
@@ -588,7 +588,8 @@
 
 mirror::String* Thread::GetThreadName(const ScopedObjectAccessAlreadyRunnable& soa) const {
   ArtField* f = soa.DecodeField(WellKnownClasses::java_lang_Thread_name);
-  return (tlsPtr_.opeer != nullptr) ? reinterpret_cast<mirror::String*>(f->GetObject(tlsPtr_.opeer)) : nullptr;
+  return (tlsPtr_.opeer != nullptr) ?
+      reinterpret_cast<mirror::String*>(f->GetObject(tlsPtr_.opeer)) : nullptr;
 }
 
 void Thread::GetThreadName(std::string& name) const {
@@ -713,9 +714,8 @@
   union StateAndFlags new_state_and_flags;
   new_state_and_flags.as_int = old_state_and_flags.as_int;
   new_state_and_flags.as_struct.flags |= kCheckpointRequest;
-  bool success =
-      tls32_.state_and_flags.as_atomic_int.CompareExchangeStrongSequentiallyConsistent(old_state_and_flags.as_int,
-                                                                                       new_state_and_flags.as_int);
+  bool success =tls32_.state_and_flags.as_atomic_int.CompareExchangeStrongSequentiallyConsistent(
+      old_state_and_flags.as_int, new_state_and_flags.as_int);
   if (UNLIKELY(!success)) {
     // The thread changed state before the checkpoint was installed.
     CHECK_EQ(tlsPtr_.checkpoint_functions[available_checkpoint], function);
@@ -1005,8 +1005,8 @@
 
   // Threads with no managed stack frames should be shown.
   const ManagedStack* managed_stack = thread->GetManagedStack();
-  if (managed_stack == NULL || (managed_stack->GetTopQuickFrame() == NULL &&
-      managed_stack->GetTopShadowFrame() == NULL)) {
+  if (managed_stack == nullptr || (managed_stack->GetTopQuickFrame() == nullptr &&
+      managed_stack->GetTopShadowFrame() == nullptr)) {
     return true;
   }
 
@@ -1097,7 +1097,7 @@
   {
     // MutexLock to keep annotalysis happy.
     //
-    // Note we use nullptr for the thread because Thread::Current can
+    // Note we use null for the thread because Thread::Current can
     // return garbage since (is_started_ == true) and
     // Thread::pthread_key_self_ is not yet initialized.
     // This was seen on glibc.
@@ -1162,7 +1162,7 @@
 bool Thread::IsStillStarting() const {
   // You might think you can check whether the state is kStarting, but for much of thread startup,
   // the thread is in kNative; it might also be in kVmWait.
-  // You might think you can check whether the peer is nullptr, but the peer is actually created and
+  // You might think you can check whether the peer is null, but the peer is actually created and
   // assigned fairly early on, and needs to be.
   // It turns out that the last thing to change is the thread name; that's a good proxy for "has
   // this thread _ever_ entered kRunnable".
@@ -1424,7 +1424,7 @@
     DCHECK_EQ(kind, kWeakGlobal);
     result = tlsPtr_.jni_env->vm->DecodeWeakGlobal(const_cast<Thread*>(this), ref);
     if (Runtime::Current()->IsClearedJniWeakGlobal(result)) {
-      // This is a special case where it's okay to return nullptr.
+      // This is a special case where it's okay to return null.
       expect_null = true;
       result = nullptr;
     }
@@ -2197,7 +2197,7 @@
         const uint8_t* native_gc_map = m->GetNativeGcMap(sizeof(void*));
         CHECK(native_gc_map != nullptr) << PrettyMethod(m);
         const DexFile::CodeItem* code_item = m->GetCodeItem();
-        // Can't be nullptr or how would we compile its instructions?
+        // Can't be null or how would we compile its instructions?
         DCHECK(code_item != nullptr) << PrettyMethod(m);
         NativePcOffsetToReferenceMap map(native_gc_map);
         size_t num_regs = std::min(map.RegWidth() * 8,
diff --git a/runtime/thread.h b/runtime/thread.h
index 719668b..dd9e734 100644
--- a/runtime/thread.h
+++ b/runtime/thread.h
@@ -186,7 +186,7 @@
       LOCKS_EXCLUDED(Locks::thread_suspend_count_lock_)
       SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
 
-  // Dumps the SIGQUIT per-thread header. 'thread' can be NULL for a non-attached thread, in which
+  // Dumps the SIGQUIT per-thread header. 'thread' can be null for a non-attached thread, in which
   // case we use 'tid' to identify the thread, and we'll include as much information as we can.
   static void DumpState(std::ostream& os, const Thread* thread, pid_t tid)
       LOCKS_EXCLUDED(Locks::thread_suspend_count_lock_)
@@ -246,7 +246,7 @@
   // Once called thread suspension will cause an assertion failure.
   const char* StartAssertNoThreadSuspension(const char* cause) {
     if (kIsDebugBuild) {
-      CHECK(cause != NULL);
+      CHECK(cause != nullptr);
       const char* previous_cause = tlsPtr_.last_no_thread_suspension_cause;
       tls32_.no_thread_suspension++;
       tlsPtr_.last_no_thread_suspension_cause = cause;
@@ -298,7 +298,7 @@
     return tls32_.tid;
   }
 
-  // Returns the java.lang.Thread's name, or NULL if this Thread* doesn't have a peer.
+  // Returns the java.lang.Thread's name, or null if this Thread* doesn't have a peer.
   mirror::String* GetThreadName(const ScopedObjectAccessAlreadyRunnable& ts) const
       SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
 
@@ -341,7 +341,7 @@
 
   void SetException(mirror::Throwable* new_exception)
       SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
-    CHECK(new_exception != NULL);
+    CHECK(new_exception != nullptr);
     // TODO: DCHECK(!IsExceptionPending());
     tlsPtr_.exception = new_exception;
   }
@@ -393,11 +393,11 @@
         (tlsPtr_.managed_stack.GetTopShadowFrame() != nullptr);
   }
 
-  // If 'msg' is NULL, no detail message is set.
+  // If 'msg' is null, no detail message is set.
   void ThrowNewException(const char* exception_class_descriptor, const char* msg)
       SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
 
-  // If 'msg' is NULL, no detail message is set. An exception must be pending, and will be
+  // If 'msg' is null, no detail message is set. An exception must be pending, and will be
   // used as the new exception's cause.
   void ThrowNewWrappedException(const char* exception_class_descriptor, const char* msg)
       SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
@@ -489,8 +489,8 @@
       SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
 
   // Convert an internal stack trace representation (returned by CreateInternalStackTrace) to a
-  // StackTraceElement[]. If output_array is NULL, a new array is created, otherwise as many
-  // frames as will fit are written into the given array. If stack_depth is non-NULL, it's updated
+  // StackTraceElement[]. If output_array is null, a new array is created, otherwise as many
+  // frames as will fit are written into the given array. If stack_depth is non-null, it's updated
   // with the number of valid frames in the returned array.
   static jobjectArray InternalStackTraceToStackTraceElementArray(
       const ScopedObjectAccessAlreadyRunnable& soa, jobject internal,
@@ -1097,7 +1097,7 @@
     // The biased card table, see CardTable for details.
     uint8_t* card_table;
 
-    // The pending exception or NULL.
+    // The pending exception or null.
     mirror::Throwable* exception;
 
     // The end of this thread's stack. This is the lowest safely-addressable address on the stack.
@@ -1133,13 +1133,13 @@
     // Pointer to previous stack trace captured by sampling profiler.
     std::vector<mirror::ArtMethod*>* stack_trace_sample;
 
-    // The next thread in the wait set this thread is part of or NULL if not waiting.
+    // The next thread in the wait set this thread is part of or null if not waiting.
     Thread* wait_next;
 
     // If we're blocked in MonitorEnter, this is the object we're trying to lock.
     mirror::Object* monitor_enter_object;
 
-    // Top of linked list of handle scopes or nullptr for none.
+    // Top of linked list of handle scopes or null for none.
     HandleScope* top_handle_scope;
 
     // Needed to get the right ClassLoader in JNI_OnLoad, but also
@@ -1174,7 +1174,7 @@
     // If no_thread_suspension_ is > 0, what is causing that assertion.
     const char* last_no_thread_suspension_cause;
 
-    // Pending checkpoint function or NULL if non-pending. Installation guarding by
+    // Pending checkpoint function or null if non-pending. Installation guarding by
     // Locks::thread_suspend_count_lock_.
     Closure* checkpoint_functions[kMaxCheckpoints];
 
@@ -1215,7 +1215,7 @@
 
   // Condition variable waited upon during a wait.
   ConditionVariable* wait_cond_ GUARDED_BY(wait_mutex_);
-  // Pointer to the monitor lock we're currently waiting on or NULL if not waiting.
+  // Pointer to the monitor lock we're currently waiting on or null if not waiting.
   Monitor* wait_monitor_ GUARDED_BY(wait_mutex_);
 
   // Thread "interrupted" status; stays raised until queried or thrown.
diff --git a/runtime/thread_linux.cc b/runtime/thread_linux.cc
index 0284364..0526f49 100644
--- a/runtime/thread_linux.cc
+++ b/runtime/thread_linux.cc
@@ -50,26 +50,26 @@
   ss.ss_sp = new uint8_t[kHostAltSigStackSize];
   ss.ss_size = kHostAltSigStackSize;
   ss.ss_flags = 0;
-  CHECK(ss.ss_sp != NULL);
-  SigAltStack(&ss, NULL);
+  CHECK(ss.ss_sp != nullptr);
+  SigAltStack(&ss, nullptr);
 
   // Double-check that it worked.
-  ss.ss_sp = NULL;
-  SigAltStack(NULL, &ss);
+  ss.ss_sp = nullptr;
+  SigAltStack(nullptr, &ss);
   VLOG(threads) << "Alternate signal stack is " << PrettySize(ss.ss_size) << " at " << ss.ss_sp;
 }
 
 void Thread::TearDownAlternateSignalStack() {
   // Get the pointer so we can free the memory.
   stack_t ss;
-  SigAltStack(NULL, &ss);
+  SigAltStack(nullptr, &ss);
   uint8_t* allocated_signal_stack = reinterpret_cast<uint8_t*>(ss.ss_sp);
 
   // Tell the kernel to stop using it.
-  ss.ss_sp = NULL;
+  ss.ss_sp = nullptr;
   ss.ss_flags = SS_DISABLE;
   ss.ss_size = kHostAltSigStackSize;  // Avoid ENOMEM failure with Mac OS' buggy libc.
-  SigAltStack(&ss, NULL);
+  SigAltStack(&ss, nullptr);
 
   // Free it.
   delete[] allocated_signal_stack;
diff --git a/runtime/thread_list.cc b/runtime/thread_list.cc
index 560bcc1..cc54bbd 100644
--- a/runtime/thread_list.cc
+++ b/runtime/thread_list.cc
@@ -116,9 +116,9 @@
 }
 
 static void DumpUnattachedThread(std::ostream& os, pid_t tid) NO_THREAD_SAFETY_ANALYSIS {
-  // TODO: No thread safety analysis as DumpState with a NULL thread won't access fields, should
+  // TODO: No thread safety analysis as DumpState with a null thread won't access fields, should
   // refactor DumpState to avoid skipping analysis.
-  Thread::DumpState(os, NULL, tid);
+  Thread::DumpState(os, nullptr, tid);
   DumpKernelStack(os, tid, "  kernel: ", false);
   // TODO: Reenable this when the native code in system_server can handle it.
   // Currently "adb shell kill -3 `pid system_server`" will cause it to exit.
@@ -136,7 +136,7 @@
 
   Thread* self = Thread::Current();
   dirent* e;
-  while ((e = readdir(d)) != NULL) {
+  while ((e = readdir(d)) != nullptr) {
     char* end;
     pid_t tid = strtol(e->d_name, &end, 10);
     if (!*end) {
@@ -602,7 +602,7 @@
       scoped_name_string(env, (jstring)env->GetObjectField(peer,
                                                           WellKnownClasses::java_lang_Thread_name));
   ScopedUtfChars scoped_name_chars(env, scoped_name_string.get());
-  if (scoped_name_chars.c_str() == NULL) {
+  if (scoped_name_chars.c_str() == nullptr) {
       LOG(severity) << message << ": " << peer;
       env->ExceptionClear();
   } else {
@@ -813,7 +813,7 @@
       return thread;
     }
   }
-  return NULL;
+  return nullptr;
 }
 
 void ThreadList::SuspendAllForDebugger() {
@@ -865,7 +865,7 @@
 
   // The debugger thread must not suspend itself due to debugger activity!
   Thread* debug_thread = Dbg::GetDebugThread();
-  CHECK(debug_thread != NULL);
+  CHECK(debug_thread != nullptr);
   CHECK(self != debug_thread);
   CHECK_NE(self->GetState(), kRunnable);
   Locks::mutator_lock_->AssertNotHeld(self);
@@ -1142,7 +1142,7 @@
 
   // Clear the TLS data, so that the underlying native thread is recognizably detached.
   // (It may wish to reattach later.)
-  CHECK_PTHREAD_CALL(pthread_setspecific, (Thread::pthread_key_self_, NULL), "detach self");
+  CHECK_PTHREAD_CALL(pthread_setspecific, (Thread::pthread_key_self_, nullptr), "detach self");
 
   // Signal that a thread just detached.
   MutexLock mu(nullptr, *Locks::thread_list_lock_);
diff --git a/runtime/thread_list.h b/runtime/thread_list.h
index fa747b8..0f094cc 100644
--- a/runtime/thread_list.h
+++ b/runtime/thread_list.h
@@ -68,7 +68,7 @@
 
 
   // Suspend a thread using a peer, typically used by the debugger. Returns the thread on success,
-  // else NULL. The peer is used to identify the thread to avoid races with the thread terminating.
+  // else null. The peer is used to identify the thread to avoid races with the thread terminating.
   // If the thread should be suspended then value of request_suspension should be true otherwise
   // the routine will wait for a previous suspend request. If the suspension times out then *timeout
   // is set to true.
@@ -79,7 +79,7 @@
                      Locks::thread_suspend_count_lock_);
 
   // Suspend a thread using its thread id, typically used by lock/monitor inflation. Returns the
-  // thread on success else NULL. The thread id is used to identify the thread to avoid races with
+  // thread on success else null. The thread id is used to identify the thread to avoid races with
   // the thread terminating. Note that as thread ids are recycled this may not suspend the expected
   // thread, that may be terminating. If the suspension times out then *timeout is set to true.
   Thread* SuspendThreadByThreadId(uint32_t thread_id, bool debug_suspension, bool* timed_out)
@@ -164,7 +164,7 @@
       LOCKS_EXCLUDED(Locks::thread_list_lock_,
                      Locks::thread_suspend_count_lock_);
 
-  void AssertThreadsAreSuspended(Thread* self, Thread* ignore1, Thread* ignore2 = NULL)
+  void AssertThreadsAreSuspended(Thread* self, Thread* ignore1, Thread* ignore2 = nullptr)
       LOCKS_EXCLUDED(Locks::thread_list_lock_,
                      Locks::thread_suspend_count_lock_);
 
diff --git a/runtime/thread_pool.cc b/runtime/thread_pool.cc
index 2a82285..ce76eae 100644
--- a/runtime/thread_pool.cc
+++ b/runtime/thread_pool.cc
@@ -158,7 +158,7 @@
     --waiting_count_;
   }
 
-  // We are shutting down, return nullptr to tell the worker thread to stop looping.
+  // We are shutting down, return null to tell the worker thread to stop looping.
   return nullptr;
 }
 
diff --git a/runtime/thread_pool.h b/runtime/thread_pool.h
index 79b57af..0557708 100644
--- a/runtime/thread_pool.h
+++ b/runtime/thread_pool.h
@@ -112,7 +112,7 @@
   // get a task to run, blocks if there are no tasks left
   virtual Task* GetTask(Thread* self);
 
-  // Try to get a task, returning NULL if there is none available.
+  // Try to get a task, returning null if there is none available.
   Task* TryGetTask(Thread* self);
   Task* TryGetTaskLocked() EXCLUSIVE_LOCKS_REQUIRED(task_queue_lock_);
 
@@ -166,7 +166,7 @@
   virtual ~WorkStealingWorker();
 
   bool IsRunningTask() const {
-    return task_ != NULL;
+    return task_ != nullptr;
   }
 
  protected:
diff --git a/runtime/trace.h b/runtime/trace.h
index b8329ff..1ecd4d8 100644
--- a/runtime/trace.h
+++ b/runtime/trace.h
@@ -172,7 +172,7 @@
   void WriteToBuf(const uint8_t* src, size_t src_size)
       EXCLUSIVE_LOCKS_REQUIRED(streaming_lock_);
 
-  // Singleton instance of the Trace or NULL when no method tracing is active.
+  // Singleton instance of the Trace or null when no method tracing is active.
   static Trace* volatile the_trace_ GUARDED_BY(Locks::trace_lock_);
 
   // The default profiler clock source.
@@ -184,7 +184,7 @@
   // Used to remember an unused stack trace to avoid re-allocation during sampling.
   static std::unique_ptr<std::vector<mirror::ArtMethod*>> temp_stack_trace_;
 
-  // File to write trace data out to, NULL if direct to ddms.
+  // File to write trace data out to, null if direct to ddms.
   std::unique_ptr<File> trace_file_;
 
   // Buffer to store trace data.
diff --git a/runtime/utils.cc b/runtime/utils.cc
index a303aa4..ec7131d 100644
--- a/runtime/utils.cc
+++ b/runtime/utils.cc
@@ -60,7 +60,7 @@
 pid_t GetTid() {
 #if defined(__APPLE__)
   uint64_t owner;
-  CHECK_PTHREAD_CALL(pthread_threadid_np, (NULL, &owner), __FUNCTION__);  // Requires Mac OS 10.6
+  CHECK_PTHREAD_CALL(pthread_threadid_np, (nullptr, &owner), __FUNCTION__);  // Requires Mac OS 10.6
   return owner;
 #elif defined(__BIONIC__)
   return gettid();
@@ -205,7 +205,7 @@
 }
 
 std::string GetIsoDate() {
-  time_t now = time(NULL);
+  time_t now = time(nullptr);
   tm tmbuf;
   tm* ptm = localtime_r(&now, &tmbuf);
   return StringPrintf("%04d-%02d-%02d %02d:%02d:%02d",
@@ -220,7 +220,7 @@
   return static_cast<uint64_t>(now.tv_sec) * UINT64_C(1000) + now.tv_nsec / UINT64_C(1000000);
 #else  // __APPLE__
   timeval now;
-  gettimeofday(&now, NULL);
+  gettimeofday(&now, nullptr);
   return static_cast<uint64_t>(now.tv_sec) * UINT64_C(1000) + now.tv_usec / UINT64_C(1000);
 #endif
 }
@@ -232,7 +232,7 @@
   return static_cast<uint64_t>(now.tv_sec) * UINT64_C(1000000) + now.tv_nsec / UINT64_C(1000);
 #else  // __APPLE__
   timeval now;
-  gettimeofday(&now, NULL);
+  gettimeofday(&now, nullptr);
   return static_cast<uint64_t>(now.tv_sec) * UINT64_C(1000000) + now.tv_usec;
 #endif
 }
@@ -244,7 +244,7 @@
   return static_cast<uint64_t>(now.tv_sec) * UINT64_C(1000000000) + now.tv_nsec;
 #else  // __APPLE__
   timeval now;
-  gettimeofday(&now, NULL);
+  gettimeofday(&now, nullptr);
   return static_cast<uint64_t>(now.tv_sec) * UINT64_C(1000000000) + now.tv_usec * UINT64_C(1000);
 #endif
 }
@@ -264,7 +264,7 @@
   timespec tm;
   tm.tv_sec = 0;
   tm.tv_nsec = ns;
-  nanosleep(&tm, NULL);
+  nanosleep(&tm, nullptr);
 }
 
 void InitTimeSpec(bool absolute, int clock, int64_t ms, int32_t ns, timespec* ts) {
@@ -276,7 +276,7 @@
 #else
     UNUSED(clock);
     timeval tv;
-    gettimeofday(&tv, NULL);
+    gettimeofday(&tv, nullptr);
     ts->tv_sec = tv.tv_sec;
     ts->tv_nsec = tv.tv_usec * 1000;
 #endif
@@ -301,14 +301,14 @@
 }
 
 std::string PrettyDescriptor(mirror::String* java_descriptor) {
-  if (java_descriptor == NULL) {
+  if (java_descriptor == nullptr) {
     return "null";
   }
   return PrettyDescriptor(java_descriptor->ToModifiedUtf8().c_str());
 }
 
 std::string PrettyDescriptor(mirror::Class* klass) {
-  if (klass == NULL) {
+  if (klass == nullptr) {
     return "null";
   }
   std::string temp;
@@ -365,7 +365,7 @@
 }
 
 std::string PrettyField(ArtField* f, bool with_type) {
-  if (f == NULL) {
+  if (f == nullptr) {
     return "null";
   }
   std::string result;
@@ -436,7 +436,7 @@
 
 std::string PrettyReturnType(const char* signature) {
   const char* return_type = strchr(signature, ')');
-  CHECK(return_type != NULL);
+  CHECK(return_type != nullptr);
   ++return_type;  // Skip ')'.
   return PrettyDescriptor(return_type);
 }
@@ -484,10 +484,10 @@
 }
 
 std::string PrettyTypeOf(mirror::Object* obj) {
-  if (obj == NULL) {
+  if (obj == nullptr) {
     return "null";
   }
-  if (obj->GetClass() == NULL) {
+  if (obj->GetClass() == nullptr) {
     return "(raw)";
   }
   std::string temp;
@@ -499,7 +499,7 @@
 }
 
 std::string PrettyClass(mirror::Class* c) {
-  if (c == NULL) {
+  if (c == nullptr) {
     return "null";
   }
   std::string result;
@@ -510,7 +510,7 @@
 }
 
 std::string PrettyClassAndClassLoader(mirror::Class* c) {
-  if (c == NULL) {
+  if (c == nullptr) {
     return "null";
   }
   std::string result;
@@ -1158,9 +1158,9 @@
   std::vector<std::string> fields;
   Split(stats, ' ', &fields);
   *state = fields[0][0];
-  *utime = strtoull(fields[11].c_str(), NULL, 10);
-  *stime = strtoull(fields[12].c_str(), NULL, 10);
-  *task_cpu = strtoull(fields[36].c_str(), NULL, 10);
+  *utime = strtoull(fields[11].c_str(), nullptr, 10);
+  *stime = strtoull(fields[12].c_str(), nullptr, 10);
+  *task_cpu = strtoull(fields[36].c_str(), nullptr, 10);
 }
 
 std::string GetSchedulerGroupName(pid_t tid) {
@@ -1358,7 +1358,7 @@
     // into "futex_wait_queue_me+0xcd/0x110".
     const char* text = kernel_stack_frames[i].c_str();
     const char* close_bracket = strchr(text, ']');
-    if (close_bracket != NULL) {
+    if (close_bracket != nullptr) {
       text = close_bracket + 2;
     }
     os << prefix;
@@ -1373,7 +1373,7 @@
 
 const char* GetAndroidRoot() {
   const char* android_root = getenv("ANDROID_ROOT");
-  if (android_root == NULL) {
+  if (android_root == nullptr) {
     if (OS::DirectoryExists("/system")) {
       android_root = "/system";
     } else {
@@ -1401,7 +1401,7 @@
 
 const char* GetAndroidDataSafe(std::string* error_msg) {
   const char* android_data = getenv("ANDROID_DATA");
-  if (android_data == NULL) {
+  if (android_data == nullptr) {
     if (OS::DirectoryExists("/data")) {
       android_data = "/data";
     } else {
@@ -1563,7 +1563,7 @@
     CHECK(arg_str != nullptr) << i;
     args.push_back(arg_str);
   }
-  args.push_back(NULL);
+  args.push_back(nullptr);
 
   // fork and exec
   pid_t pid = fork();
diff --git a/runtime/utils.h b/runtime/utils.h
index 6708c67..853fa08 100644
--- a/runtime/utils.h
+++ b/runtime/utils.h
@@ -508,7 +508,7 @@
 
 // Find $ANDROID_DATA, /data, or abort.
 const char* GetAndroidData();
-// Find $ANDROID_DATA, /data, or return nullptr.
+// Find $ANDROID_DATA, /data, or return null.
 const char* GetAndroidDataSafe(std::string* error_msg);
 
 // Returns the dalvik-cache location, with subdir appended. Returns the empty string if the cache
diff --git a/runtime/utils_test.cc b/runtime/utils_test.cc
index 6ccbd13..ae24b77 100644
--- a/runtime/utils_test.cc
+++ b/runtime/utils_test.cc
@@ -106,7 +106,7 @@
 
 TEST_F(UtilsTest, PrettyTypeOf) {
   ScopedObjectAccess soa(Thread::Current());
-  EXPECT_EQ("null", PrettyTypeOf(NULL));
+  EXPECT_EQ("null", PrettyTypeOf(nullptr));
 
   StackHandleScope<2> hs(soa.Self());
   Handle<mirror::String> s(hs.NewHandle(mirror::String::AllocFromModifiedUtf8(soa.Self(), "")));
@@ -116,7 +116,7 @@
   EXPECT_EQ("short[]", PrettyTypeOf(a.Get()));
 
   mirror::Class* c = class_linker_->FindSystemClass(soa.Self(), "[Ljava/lang/String;");
-  ASSERT_TRUE(c != NULL);
+  ASSERT_TRUE(c != nullptr);
   mirror::Object* o = mirror::ObjectArray<mirror::String>::Alloc(soa.Self(), c, 0);
   EXPECT_EQ("java.lang.String[]", PrettyTypeOf(o));
   EXPECT_EQ("java.lang.Class<java.lang.String[]>", PrettyTypeOf(o->GetClass()));
@@ -124,25 +124,25 @@
 
 TEST_F(UtilsTest, PrettyClass) {
   ScopedObjectAccess soa(Thread::Current());
-  EXPECT_EQ("null", PrettyClass(NULL));
+  EXPECT_EQ("null", PrettyClass(nullptr));
   mirror::Class* c = class_linker_->FindSystemClass(soa.Self(), "[Ljava/lang/String;");
-  ASSERT_TRUE(c != NULL);
+  ASSERT_TRUE(c != nullptr);
   mirror::Object* o = mirror::ObjectArray<mirror::String>::Alloc(soa.Self(), c, 0);
   EXPECT_EQ("java.lang.Class<java.lang.String[]>", PrettyClass(o->GetClass()));
 }
 
 TEST_F(UtilsTest, PrettyClassAndClassLoader) {
   ScopedObjectAccess soa(Thread::Current());
-  EXPECT_EQ("null", PrettyClassAndClassLoader(NULL));
+  EXPECT_EQ("null", PrettyClassAndClassLoader(nullptr));
   mirror::Class* c = class_linker_->FindSystemClass(soa.Self(), "[Ljava/lang/String;");
-  ASSERT_TRUE(c != NULL);
+  ASSERT_TRUE(c != nullptr);
   mirror::Object* o = mirror::ObjectArray<mirror::String>::Alloc(soa.Self(), c, 0);
   EXPECT_EQ("java.lang.Class<java.lang.String[],null>", PrettyClassAndClassLoader(o->GetClass()));
 }
 
 TEST_F(UtilsTest, PrettyField) {
   ScopedObjectAccess soa(Thread::Current());
-  EXPECT_EQ("null", PrettyField(NULL));
+  EXPECT_EQ("null", PrettyField(nullptr));
 
   mirror::Class* java_lang_String = class_linker_->FindSystemClass(soa.Self(),
                                                                    "Ljava/lang/String;");
@@ -216,21 +216,21 @@
 TEST_F(UtilsTest, JniShortName_JniLongName) {
   ScopedObjectAccess soa(Thread::Current());
   mirror::Class* c = class_linker_->FindSystemClass(soa.Self(), "Ljava/lang/String;");
-  ASSERT_TRUE(c != NULL);
+  ASSERT_TRUE(c != nullptr);
   mirror::ArtMethod* m;
 
   m = c->FindVirtualMethod("charAt", "(I)C");
-  ASSERT_TRUE(m != NULL);
+  ASSERT_TRUE(m != nullptr);
   EXPECT_EQ("Java_java_lang_String_charAt", JniShortName(m));
   EXPECT_EQ("Java_java_lang_String_charAt__I", JniLongName(m));
 
   m = c->FindVirtualMethod("indexOf", "(Ljava/lang/String;I)I");
-  ASSERT_TRUE(m != NULL);
+  ASSERT_TRUE(m != nullptr);
   EXPECT_EQ("Java_java_lang_String_indexOf", JniShortName(m));
   EXPECT_EQ("Java_java_lang_String_indexOf__Ljava_lang_String_2I", JniLongName(m));
 
   m = c->FindDirectMethod("copyValueOf", "([CII)Ljava/lang/String;");
-  ASSERT_TRUE(m != NULL);
+  ASSERT_TRUE(m != nullptr);
   EXPECT_EQ("Java_java_lang_String_copyValueOf", JniShortName(m));
   EXPECT_EQ("Java_java_lang_String_copyValueOf___3CII", JniLongName(m));
 }
diff --git a/runtime/verifier/dex_gc_map.cc b/runtime/verifier/dex_gc_map.cc
index cd0b137..c435f9f 100644
--- a/runtime/verifier/dex_gc_map.cc
+++ b/runtime/verifier/dex_gc_map.cc
@@ -49,7 +49,7 @@
   if (error_if_not_present) {
     LOG(ERROR) << "Didn't find reference bit map for dex_pc " << dex_pc;
   }
-  return NULL;
+  return nullptr;
 }
 
 }  // namespace verifier
diff --git a/runtime/verifier/dex_gc_map.h b/runtime/verifier/dex_gc_map.h
index d77ea65..03a7821 100644
--- a/runtime/verifier/dex_gc_map.h
+++ b/runtime/verifier/dex_gc_map.h
@@ -39,7 +39,7 @@
 class DexPcToReferenceMap {
  public:
   explicit DexPcToReferenceMap(const uint8_t* data) : data_(data) {
-    CHECK(data_ != NULL);
+    CHECK(data_ != nullptr);
   }
 
   // The total size of the reference bit map including header.
diff --git a/runtime/verifier/method_verifier.h b/runtime/verifier/method_verifier.h
index cd414c2..2914b7c 100644
--- a/runtime/verifier/method_verifier.h
+++ b/runtime/verifier/method_verifier.h
@@ -244,11 +244,11 @@
   bool HasFailures() const;
   const RegType& ResolveCheckedClass(uint32_t class_idx)
       SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
-  // Returns the method of a quick invoke or nullptr if it cannot be found.
+  // Returns the method of a quick invoke or null if it cannot be found.
   mirror::ArtMethod* GetQuickInvokedMethod(const Instruction* inst, RegisterLine* reg_line,
                                            bool is_range, bool allow_failure)
       SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
-  // Returns the access field of a quick field access (iget/iput-quick) or nullptr
+  // Returns the access field of a quick field access (iget/iput-quick) or null
   // if it cannot be found.
   ArtField* GetQuickFieldAccess(const Instruction* inst, RegisterLine* reg_line)
       SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
@@ -585,7 +585,7 @@
    * Widening conversions on integers and references are allowed, but
    * narrowing conversions are not.
    *
-   * Returns the resolved method on success, nullptr on failure (with *failure
+   * Returns the resolved method on success, null on failure (with *failure
    * set appropriately).
    */
   mirror::ArtMethod* VerifyInvocationArgs(const Instruction* inst,
@@ -686,7 +686,7 @@
   // The dex PC of a FindLocksAtDexPc request, -1 otherwise.
   uint32_t interesting_dex_pc_;
   // The container into which FindLocksAtDexPc should write the registers containing held locks,
-  // nullptr if we're not doing FindLocksAtDexPc.
+  // null if we're not doing FindLocksAtDexPc.
   std::vector<uint32_t>* monitor_enter_dex_pcs_;
 
   // The types of any error that occurs.
diff --git a/runtime/verifier/method_verifier_test.cc b/runtime/verifier/method_verifier_test.cc
index f67adc1..3994536 100644
--- a/runtime/verifier/method_verifier_test.cc
+++ b/runtime/verifier/method_verifier_test.cc
@@ -31,7 +31,7 @@
  protected:
   void VerifyClass(const std::string& descriptor)
       SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
-    ASSERT_TRUE(descriptor != NULL);
+    ASSERT_TRUE(descriptor != nullptr);
     Thread* self = Thread::Current();
     mirror::Class* klass = class_linker_->FindSystemClass(self, descriptor.c_str());
 
diff --git a/runtime/verifier/reg_type.h b/runtime/verifier/reg_type.h
index e4d2c3e..d08c937 100644
--- a/runtime/verifier/reg_type.h
+++ b/runtime/verifier/reg_type.h
@@ -707,7 +707,7 @@
   UnresolvedUninitializedRefType(const std::string& descriptor,
                                  uint32_t allocation_pc, uint16_t cache_id)
       SHARED_LOCKS_REQUIRED(Locks::mutator_lock_)
-      : UninitializedType(NULL, descriptor, allocation_pc, cache_id) {
+      : UninitializedType(nullptr, descriptor, allocation_pc, cache_id) {
     if (kIsDebugBuild) {
       CheckInvariants();
     }
@@ -752,7 +752,7 @@
   UnresolvedUninitializedThisRefType(const std::string& descriptor,
                                      uint16_t cache_id)
       SHARED_LOCKS_REQUIRED(Locks::mutator_lock_)
-      : UninitializedType(NULL, descriptor, 0, cache_id) {
+      : UninitializedType(nullptr, descriptor, 0, cache_id) {
     if (kIsDebugBuild) {
       CheckInvariants();
     }
@@ -808,7 +808,7 @@
  public:
   UnresolvedType(const std::string& descriptor, uint16_t cache_id)
       SHARED_LOCKS_REQUIRED(Locks::mutator_lock_)
-      : RegType(NULL, descriptor, cache_id) {}
+      : RegType(nullptr, descriptor, cache_id) {}
 
   bool IsNonZeroReferenceTypes() const OVERRIDE;
 };
diff --git a/runtime/verifier/reg_type_cache-inl.h b/runtime/verifier/reg_type_cache-inl.h
index 9024a7d..b6f253b 100644
--- a/runtime/verifier/reg_type_cache-inl.h
+++ b/runtime/verifier/reg_type_cache-inl.h
@@ -30,7 +30,7 @@
 inline const art::verifier::RegType& RegTypeCache::GetFromId(uint16_t id) const {
   DCHECK_LT(id, entries_.size());
   const RegType* result = entries_[id];
-  DCHECK(result != NULL);
+  DCHECK(result != nullptr);
   return *result;
 }
 
diff --git a/runtime/well_known_classes.cc b/runtime/well_known_classes.cc
index f57f9c4..a803df8 100644
--- a/runtime/well_known_classes.cc
+++ b/runtime/well_known_classes.cc
@@ -78,7 +78,7 @@
 jmethodID WellKnownClasses::java_lang_reflect_Proxy_invoke;
 jmethodID WellKnownClasses::java_lang_Runtime_nativeLoad;
 jmethodID WellKnownClasses::java_lang_Short_valueOf;
-jmethodID WellKnownClasses::java_lang_System_runFinalization = NULL;
+jmethodID WellKnownClasses::java_lang_System_runFinalization = nullptr;
 jmethodID WellKnownClasses::java_lang_Thread_init;
 jmethodID WellKnownClasses::java_lang_Thread_run;
 jmethodID WellKnownClasses::java_lang_Thread__UncaughtExceptionHandler_uncaughtException;
@@ -123,7 +123,7 @@
 
 static jclass CacheClass(JNIEnv* env, const char* jni_class_name) {
   ScopedLocalRef<jclass> c(env, env->FindClass(jni_class_name));
-  if (c.get() == NULL) {
+  if (c.get() == nullptr) {
     LOG(FATAL) << "Couldn't find class: " << jni_class_name;
   }
   return reinterpret_cast<jclass>(env->NewGlobalRef(c.get()));
@@ -134,7 +134,7 @@
   jfieldID fid = (is_static ?
                   env->GetStaticFieldID(c, name, signature) :
                   env->GetFieldID(c, name, signature));
-  if (fid == NULL) {
+  if (fid == nullptr) {
     ScopedObjectAccess soa(env);
     std::ostringstream os;
     WellKnownClasses::ToClass(c)->DumpClass(os, mirror::Class::kDumpClassFullDetail);
@@ -149,7 +149,7 @@
   jmethodID mid = (is_static ?
                    env->GetStaticMethodID(c, name, signature) :
                    env->GetMethodID(c, name, signature));
-  if (mid == NULL) {
+  if (mid == nullptr) {
     ScopedObjectAccess soa(env);
     std::ostringstream os;
     WellKnownClasses::ToClass(c)->DumpClass(os, mirror::Class::kDumpClassFullDetail);
diff --git a/runtime/zip_archive.cc b/runtime/zip_archive.cc
index ffab674..88c1f69 100644
--- a/runtime/zip_archive.cc
+++ b/runtime/zip_archive.cc
@@ -56,7 +56,7 @@
   name += " extracted in memory from ";
   name += zip_filename;
   std::unique_ptr<MemMap> map(MemMap::MapAnonymous(name.c_str(),
-                                                   NULL, GetUncompressedLength(),
+                                                   nullptr, GetUncompressedLength(),
                                                    PROT_READ | PROT_WRITE, false, false,
                                                    error_msg));
   if (map.get() == nullptr) {
diff --git a/runtime/zip_archive.h b/runtime/zip_archive.h
index 865af51..717eb8c 100644
--- a/runtime/zip_archive.h
+++ b/runtime/zip_archive.h
@@ -57,7 +57,7 @@
 
 class ZipArchive {
  public:
-  // return new ZipArchive instance on success, NULL on error.
+  // return new ZipArchive instance on success, null on error.
   static ZipArchive* Open(const char* filename, std::string* error_msg);
   static ZipArchive* OpenFromFd(int fd, const char* filename, std::string* error_msg);
 
diff --git a/runtime/zip_archive_test.cc b/runtime/zip_archive_test.cc
index 70a4dda..aded30c 100644
--- a/runtime/zip_archive_test.cc
+++ b/runtime/zip_archive_test.cc
@@ -42,11 +42,11 @@
   ScratchFile tmp;
   ASSERT_NE(-1, tmp.GetFd());
   std::unique_ptr<File> file(new File(tmp.GetFd(), tmp.GetFilename(), false));
-  ASSERT_TRUE(file.get() != NULL);
+  ASSERT_TRUE(file.get() != nullptr);
   bool success = zip_entry->ExtractToFile(*file, &error_msg);
   ASSERT_TRUE(success) << error_msg;
   ASSERT_TRUE(error_msg.empty());
-  file.reset(NULL);
+  file.reset(nullptr);
 
   uint32_t computed_crc = crc32(0L, Z_NULL, 0);
   int fd = open(tmp.GetFilename().c_str(), O_RDONLY);
diff --git a/sigchainlib/sigchain.cc b/sigchainlib/sigchain.cc
index e61fcd8..0359ed3 100644
--- a/sigchainlib/sigchain.cc
+++ b/sigchainlib/sigchain.cc
@@ -51,7 +51,7 @@
   // Unclaim the signal and restore the old action.
   void Unclaim(int signal) {
     claimed_ = false;
-    sigaction(signal, &action_, NULL);        // Restore old action.
+    sigaction(signal, &action_, nullptr);        // Restore old action.
   }
 
   // Get the action associated with this signal.
@@ -133,14 +133,14 @@
 
   const struct sigaction& action = user_sigactions[sig].GetAction();
   if (user_sigactions[sig].OldStyle()) {
-    if (action.sa_handler != NULL) {
+    if (action.sa_handler != nullptr) {
       action.sa_handler(sig);
     } else {
       signal(sig, SIG_DFL);
       raise(sig);
     }
   } else {
-    if (action.sa_sigaction != NULL) {
+    if (action.sa_sigaction != nullptr) {
       action.sa_sigaction(sig, info, context);
     } else {
       signal(sig, SIG_DFL);
@@ -172,10 +172,10 @@
   if (signal > 0 && signal < _NSIG && user_sigactions[signal].IsClaimed() &&
       (new_action == nullptr || new_action->sa_handler != SIG_DFL)) {
     struct sigaction saved_action = user_sigactions[signal].GetAction();
-    if (new_action != NULL) {
+    if (new_action != nullptr) {
       user_sigactions[signal].SetAction(*new_action, false);
     }
-    if (old_action != NULL) {
+    if (old_action != nullptr) {
       *old_action = saved_action;
     }
     return 0;
@@ -242,7 +242,7 @@
 extern "C" int sigprocmask(int how, const sigset_t* bionic_new_set, sigset_t* bionic_old_set) {
   const sigset_t* new_set_ptr = bionic_new_set;
   sigset_t tmpset;
-  if (bionic_new_set != NULL) {
+  if (bionic_new_set != nullptr) {
     tmpset = *bionic_new_set;
 
     if (how == SIG_BLOCK) {
diff --git a/test/004-JniTest/jni_test.cc b/test/004-JniTest/jni_test.cc
index 544cbc5..b23b97b 100644
--- a/test/004-JniTest/jni_test.cc
+++ b/test/004-JniTest/jni_test.cc
@@ -25,7 +25,7 @@
 #error test code compiled without NDEBUG
 #endif
 
-static JavaVM* jvm = NULL;
+static JavaVM* jvm = nullptr;
 
 extern "C" JNIEXPORT jint JNI_OnLoad(JavaVM *vm, void *) {
   assert(vm != nullptr);
@@ -38,7 +38,7 @@
   assert(jvm != nullptr);
 
   JNIEnv* env = nullptr;
-  JavaVMAttachArgs args = { JNI_VERSION_1_6, __FUNCTION__, NULL };
+  JavaVMAttachArgs args = { JNI_VERSION_1_6, __FUNCTION__, nullptr };
   int attach_result = jvm->AttachCurrentThread(&env, &args);
   assert(attach_result == 0);
 
diff --git a/test/004-SignalTest/signaltest.cc b/test/004-SignalTest/signaltest.cc
index 876d27e..1414715 100644
--- a/test/004-SignalTest/signaltest.cc
+++ b/test/004-SignalTest/signaltest.cc
@@ -89,7 +89,7 @@
 }
 
 // Prevent the compiler being a smart-alec and optimizing out the assignment
-// to nullptr.
+// to null.
 char *go_away_compiler = nullptr;
 
 extern "C" JNIEXPORT jint JNICALL Java_Main_testSignal(JNIEnv*, jclass) {